aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/obsolete/sysfs-class-rfkill29
-rw-r--r--Documentation/ABI/stable/sysfs-class-rfkill67
-rw-r--r--Documentation/feature-removal-schedule.txt39
-rw-r--r--Documentation/networking/caif/Linux-CAIF.txt212
-rw-r--r--Documentation/networking/caif/README109
-rw-r--r--Documentation/networking/l2tp.txt247
-rw-r--r--Documentation/rfkill.txt44
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/mach-pxa/icontrol.c9
-rw-r--r--arch/arm/mach-pxa/zeus.c4
-rw-r--r--arch/microblaze/include/asm/system.h11
-rw-r--r--drivers/atm/atmtcp.c6
-rw-r--r--drivers/atm/eni.c2
-rw-r--r--drivers/atm/he.c4
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c7
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c15
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/net/3c503.c42
-rw-r--r--drivers/net/3c505.c7
-rw-r--r--drivers/net/3c523.c7
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/7990.c6
-rw-r--r--drivers/net/8139cp.c6
-rw-r--r--drivers/net/8139too.c6
-rw-r--r--drivers/net/82596.c6
-rw-r--r--drivers/net/Kconfig25
-rw-r--r--drivers/net/Makefile3
-rw-r--r--drivers/net/a2065.c6
-rw-r--r--drivers/net/acenic.c42
-rw-r--r--drivers/net/acenic.h6
-rw-r--r--drivers/net/amd8111e.c6
-rw-r--r--drivers/net/arm/am79c961a.c6
-rw-r--r--drivers/net/arm/at91_ether.c6
-rw-r--r--drivers/net/arm/ixp4xx_eth.c8
-rw-r--r--drivers/net/arm/ks8695net.c10
-rw-r--r--drivers/net/arm/w90p910_ether.c1
-rw-r--r--drivers/net/at1700.c6
-rw-r--r--drivers/net/atl1c/atl1c_main.c7
-rw-r--r--drivers/net/atl1e/atl1e_main.c9
-rw-r--r--drivers/net/atlx/atl1.c4
-rw-r--r--drivers/net/atlx/atl2.c7
-rw-r--r--drivers/net/atlx/atlx.c6
-rw-r--r--drivers/net/atp.c6
-rw-r--r--drivers/net/au1000_eth.c259
-rw-r--r--drivers/net/au1000_eth.h4
-rw-r--r--drivers/net/b44.c6
-rw-r--r--drivers/net/bcm63xx_enet.c12
-rw-r--r--drivers/net/benet/be.h11
-rw-r--r--drivers/net/benet/be_cmds.c14
-rw-r--r--drivers/net/benet/be_cmds.h2
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/benet/be_hw.h3
-rw-r--r--drivers/net/benet/be_main.c305
-rw-r--r--drivers/net/bfin_mac.c6
-rw-r--r--drivers/net/bmac.c12
-rw-r--r--drivers/net/bnx2.c45
-rw-r--r--drivers/net/bnx2.h6
-rw-r--r--drivers/net/bnx2x.h64
-rw-r--r--drivers/net/bnx2x_link.c12
-rw-r--r--drivers/net/bnx2x_main.c1846
-rw-r--r--drivers/net/bnx2x_reg.h27
-rw-r--r--drivers/net/bonding/bond_ipv6.c9
-rw-r--r--drivers/net/bonding/bond_main.c176
-rw-r--r--drivers/net/bonding/bonding.h2
-rw-r--r--drivers/net/caif/Kconfig17
-rw-r--r--drivers/net/caif/Makefile12
-rw-r--r--drivers/net/caif/caif_serial.c446
-rw-r--r--drivers/net/can/at91_can.c3
-rw-r--r--drivers/net/can/bfin_can.c1
-rw-r--r--drivers/net/can/mcp251x.c15
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c1
-rw-r--r--drivers/net/can/mscan/mscan.c1
-rw-r--r--drivers/net/can/sja1000/Kconfig4
-rw-r--r--drivers/net/can/sja1000/ems_pci.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c1
-rw-r--r--drivers/net/can/sja1000/plx_pci.c154
-rw-r--r--drivers/net/can/sja1000/sja1000.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_isa.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c1
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c45
-rw-r--r--drivers/net/can/ti_hecc.c1
-rw-r--r--drivers/net/cassini.c12
-rw-r--r--drivers/net/chelsio/pm3393.c7
-rw-r--r--drivers/net/chelsio/sge.c58
-rw-r--r--drivers/net/cpmac.c16
-rw-r--r--drivers/net/cris/eth_v10.c6
-rw-r--r--drivers/net/cxgb3/sge.c20
-rw-r--r--drivers/net/cxgb3/xgmac.c8
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c8
-rw-r--r--drivers/net/davinci_emac.c7
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/defxx.c6
-rw-r--r--drivers/net/depca.c6
-rw-r--r--drivers/net/dl2k.c6
-rw-r--r--drivers/net/dm9000.c6
-rw-r--r--drivers/net/dnet.c2
-rw-r--r--drivers/net/e100.c189
-rw-r--r--drivers/net/e1000/e1000_hw.c4
-rw-r--r--drivers/net/e1000/e1000_main.c11
-rw-r--r--drivers/net/e1000e/82571.c2
-rw-r--r--drivers/net/e1000e/e1000.h24
-rw-r--r--drivers/net/e1000e/ich8lan.c6
-rw-r--r--drivers/net/e1000e/lib.c21
-rw-r--r--drivers/net/e1000e/netdev.c202
-rw-r--r--drivers/net/e1000e/param.c16
-rw-r--r--drivers/net/eepro.c10
-rw-r--r--drivers/net/eexpress.c6
-rw-r--r--drivers/net/ehea/ehea_main.c16
-rw-r--r--drivers/net/enic/cq_enet_desc.h12
-rw-r--r--drivers/net/enic/enic.h4
-rw-r--r--drivers/net/enic/enic_main.c11
-rw-r--r--drivers/net/enic/vnic_dev.c52
-rw-r--r--drivers/net/enic/vnic_dev.h3
-rw-r--r--drivers/net/enic/vnic_rq.c4
-rw-r--r--drivers/net/enic/vnic_wq.c4
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/ethoc.c6
-rw-r--r--drivers/net/ewrk3.c9
-rw-r--r--drivers/net/fealnx.c6
-rw-r--r--drivers/net/fec.c1138
-rw-r--r--drivers/net/fec_mpc52xx.c7
-rw-r--r--drivers/net/forcedeth.c14
-rw-r--r--drivers/net/fs_enet/mac-fcc.c6
-rw-r--r--drivers/net/fs_enet/mac-fec.c6
-rw-r--r--drivers/net/fs_enet/mac-scc.c6
-rw-r--r--drivers/net/gianfar.c191
-rw-r--r--drivers/net/gianfar.h8
-rw-r--r--drivers/net/greth.c7
-rw-r--r--drivers/net/hamachi.c8
-rw-r--r--drivers/net/hamradio/baycom_ser_fdx.c2
-rw-r--r--drivers/net/hp100.c6
-rw-r--r--drivers/net/ibm_newemac/core.c12
-rw-r--r--drivers/net/ibmlana.c8
-rw-r--r--drivers/net/ibmveth.c6
-rw-r--r--drivers/net/igb/e1000_82575.c33
-rw-r--r--drivers/net/igb/e1000_82575.h9
-rw-r--r--drivers/net/igb/e1000_defines.h1
-rw-r--r--drivers/net/igb/e1000_hw.h17
-rw-r--r--drivers/net/igb/igb.h2
-rw-r--r--drivers/net/igb/igb_ethtool.c50
-rw-r--r--drivers/net/igb/igb_main.c123
-rw-r--r--drivers/net/igbvf/netdev.c6
-rw-r--r--drivers/net/ioc3-eth.c6
-rw-r--r--drivers/net/ipg.c11
-rw-r--r--drivers/net/ipg.h109
-rw-r--r--drivers/net/irda/Kconfig6
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ali-ircc.c32
-rw-r--r--drivers/net/irda/irda-usb.c2
-rw-r--r--drivers/net/irda/sh_irda.c865
-rw-r--r--drivers/net/irda/sh_sir.c12
-rw-r--r--drivers/net/irda/vlsi_ir.c4
-rw-r--r--drivers/net/iseries_veth.c6
-rw-r--r--drivers/net/ixgb/ixgb_main.c6
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c16
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c145
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h3
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c35
-rw-r--r--drivers/net/ixgbevf/vf.c24
-rw-r--r--drivers/net/ixgbevf/vf.h4
-rw-r--r--drivers/net/jme.c6
-rw-r--r--drivers/net/korina.c12
-rw-r--r--drivers/net/ks8842.c8
-rw-r--r--drivers/net/ks8851.c94
-rw-r--r--drivers/net/ks8851_mll.c63
-rw-r--r--drivers/net/ksz884x.c83
-rw-r--r--drivers/net/lib82596.c6
-rw-r--r--drivers/net/lib8390.c6
-rw-r--r--drivers/net/ll_temac.h14
-rw-r--r--drivers/net/ll_temac_main.c157
-rw-r--r--drivers/net/lp486e.c6
-rw-r--r--drivers/net/macb.c7
-rw-r--r--drivers/net/mace.c6
-rw-r--r--drivers/net/macmace.c6
-rw-r--r--drivers/net/macvlan.c13
-rw-r--r--drivers/net/macvtap.c4
-rw-r--r--drivers/net/mlx4/en_netdev.c53
-rw-r--r--drivers/net/mlx4/mlx4_en.h3
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c54
-rw-r--r--drivers/net/natsemi.c6
-rw-r--r--drivers/net/netxen/netxen_nic.h1
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c12
-rw-r--r--drivers/net/netxen/netxen_nic_init.c164
-rw-r--r--drivers/net/netxen/netxen_nic_main.c29
-rw-r--r--drivers/net/ni52.c6
-rw-r--r--drivers/net/niu.c7
-rw-r--r--drivers/net/octeon/octeon_mgmt.c7
-rw-r--r--drivers/net/pci-skeleton.c6
-rw-r--r--drivers/net/pcmcia/3c589_cs.c286
-rw-r--r--drivers/net/pcmcia/axnet_cs.c6
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c6
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c6
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c6
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c6
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/phy/bcm63xx.c8
-rw-r--r--drivers/net/phy/broadcom.c16
-rw-r--r--drivers/net/phy/cicada.c8
-rw-r--r--drivers/net/phy/davicom.c9
-rw-r--r--drivers/net/phy/et1011c.c7
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/lxt.c8
-rw-r--r--drivers/net/phy/marvell.c13
-rw-r--r--drivers/net/phy/mdio-bitbang.c60
-rw-r--r--drivers/net/phy/mdio_bus.c4
-rw-r--r--drivers/net/phy/national.c7
-rw-r--r--drivers/net/phy/phy_device.c12
-rw-r--r--drivers/net/phy/qsemi.c7
-rw-r--r--drivers/net/phy/realtek.c7
-rw-r--r--drivers/net/phy/smsc.c11
-rw-r--r--drivers/net/phy/ste10Xp.c8
-rw-r--r--drivers/net/phy/vitesse.c8
-rw-r--r--drivers/net/ppp_generic.c19
-rw-r--r--drivers/net/pppol2tp.c2680
-rw-r--r--drivers/net/ps3_gelic_net.c10
-rw-r--r--drivers/net/ps3_gelic_wireless.c69
-rw-r--r--drivers/net/qla3xxx.c66
-rw-r--r--drivers/net/qla3xxx.h8
-rw-r--r--drivers/net/qlcnic/qlcnic.h21
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c16
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h7
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c131
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c34
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c100
-rw-r--r--drivers/net/qlge/qlge.h8
-rw-r--r--drivers/net/qlge/qlge_dbg.c4
-rw-r--r--drivers/net/qlge/qlge_main.c64
-rw-r--r--drivers/net/r6040.c37
-rw-r--r--drivers/net/r8169.c180
-rw-r--r--drivers/net/s2io.c12
-rw-r--r--drivers/net/s6gmac.c1
-rw-r--r--drivers/net/sb1250-mac.c7
-rw-r--r--drivers/net/sc92031.c6
-rw-r--r--drivers/net/sfc/efx.c6
-rw-r--r--drivers/net/sis190.c6
-rw-r--r--drivers/net/sis900.c14
-rw-r--r--drivers/net/skfp/fplustm.c2
-rw-r--r--drivers/net/skfp/pcmplc.c4
-rw-r--r--drivers/net/skfp/skfddi.c12
-rw-r--r--drivers/net/skfp/srf.c2
-rw-r--r--drivers/net/skge.c14
-rw-r--r--drivers/net/sky2.c26
-rw-r--r--drivers/net/sky2.h20
-rw-r--r--drivers/net/smc911x.c19
-rw-r--r--drivers/net/smc9194.c53
-rw-r--r--drivers/net/smc91x.c10
-rw-r--r--drivers/net/smsc911x.c6
-rw-r--r--drivers/net/smsc9420.c6
-rw-r--r--drivers/net/sonic.c6
-rw-r--r--drivers/net/spider_net.c6
-rw-r--r--drivers/net/starfire.c10
-rw-r--r--drivers/net/stmmac/Makefile2
-rw-r--r--drivers/net/stmmac/common.h21
-rw-r--r--drivers/net/stmmac/dwmac100.c538
-rw-r--r--drivers/net/stmmac/dwmac100.h5
-rw-r--r--drivers/net/stmmac/dwmac1000.h12
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c33
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c336
-rw-r--r--drivers/net/stmmac/dwmac100_core.c201
-rw-r--r--drivers/net/stmmac/dwmac100_dma.c138
-rw-r--r--drivers/net/stmmac/dwmac_dma.h1
-rw-r--r--drivers/net/stmmac/dwmac_lib.c7
-rw-r--r--drivers/net/stmmac/enh_desc.c342
-rw-r--r--drivers/net/stmmac/norm_desc.c240
-rw-r--r--drivers/net/stmmac/stmmac.h10
-rw-r--r--drivers/net/stmmac/stmmac_main.c10
-rw-r--r--drivers/net/sun3_82586.c6
-rw-r--r--drivers/net/sunbmac.c6
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/sungem.c6
-rw-r--r--drivers/net/sunhme.c12
-rw-r--r--drivers/net/sunlance.c6
-rw-r--r--drivers/net/sunqe.c6
-rw-r--r--drivers/net/sunvnet.c8
-rw-r--r--drivers/net/tc35815.c6
-rw-r--r--drivers/net/tehuti.c9
-rw-r--r--drivers/net/tg3.c839
-rw-r--r--drivers/net/tg3.h17
-rw-r--r--drivers/net/tlan.c10
-rw-r--r--drivers/net/tokenring/3c359.c112
-rw-r--r--drivers/net/tokenring/ibmtr.c12
-rw-r--r--drivers/net/tokenring/lanstreamer.c58
-rw-r--r--drivers/net/tokenring/olympic.c74
-rw-r--r--drivers/net/tokenring/tms380tr.c16
-rw-r--r--drivers/net/tsi108_eth.c16
-rw-r--r--drivers/net/tulip/de2104x.c12
-rw-r--r--drivers/net/tulip/de4x5.c14
-rw-r--r--drivers/net/tulip/dmfe.c12
-rw-r--r--drivers/net/tulip/tulip_core.c27
-rw-r--r--drivers/net/tulip/uli526x.c6
-rw-r--r--drivers/net/tulip/winbond-840.c12
-rw-r--r--drivers/net/tulip/xircom_cb.c6
-rw-r--r--drivers/net/tun.c4
-rw-r--r--drivers/net/typhoon.c6
-rw-r--r--drivers/net/ucc_geth.c10
-rw-r--r--drivers/net/usb/asix.c16
-rw-r--r--drivers/net/usb/catc.c6
-rw-r--r--drivers/net/usb/dm9601.c6
-rw-r--r--drivers/net/usb/mcs7830.c6
-rw-r--r--drivers/net/usb/smsc75xx.c6
-rw-r--r--drivers/net/usb/smsc95xx.c6
-rw-r--r--drivers/net/usb/usbnet.c15
-rw-r--r--drivers/net/via-rhine.c6
-rw-r--r--drivers/net/via-velocity.c120
-rw-r--r--drivers/net/via-velocity.h77
-rw-r--r--drivers/net/virtio_net.c57
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vxge/vxge-config.c24
-rw-r--r--drivers/net/vxge/vxge-config.h34
-rw-r--r--drivers/net/vxge/vxge-ethtool.c5
-rw-r--r--drivers/net/vxge/vxge-main.c241
-rw-r--r--drivers/net/vxge/vxge-main.h6
-rw-r--r--drivers/net/vxge/vxge-traffic.c53
-rw-r--r--drivers/net/vxge/vxge-traffic.h50
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300_tty.c2
-rw-r--r--drivers/net/wan/sdla.c2
-rw-r--r--drivers/net/wimax/i2400m/tx.c2
-rw-r--r--drivers/net/wireless/adm8211.c12
-rw-r--r--drivers/net/wireless/airo.c37
-rw-r--r--drivers/net/wireless/ath/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h2
-rw-r--r--drivers/net/wireless/ath/ar9170/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c27
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c10
-rw-r--r--drivers/net/wireless/ath/ath.h13
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c744
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.h104
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h313
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c7
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c296
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h39
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c9
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c382
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c19
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h35
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h88
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c350
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c75
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c17
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h42
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c38
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig21
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile10
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c35
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c84
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c342
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h13
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c39
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c17
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c984
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h105
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h462
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c277
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c723
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1733
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c704
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c476
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h246
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c289
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h250
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c157
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h40
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c60
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c26
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h16
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c319
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h126
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c117
-rw-r--r--drivers/net/wireless/ath/debug.h1
-rw-r--r--drivers/net/wireless/ath/hw.c4
-rw-r--r--drivers/net/wireless/ath/regd.c3
-rw-r--r--drivers/net/wireless/b43/b43.h1
-rw-r--r--drivers/net/wireless/b43/main.c5
-rw-r--r--drivers/net/wireless/b43/phy_n.c479
-rw-r--r--drivers/net/wireless/b43/phy_n.h21
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c22
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h37
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c48
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c188
-rw-r--r--drivers/net/wireless/ipw2x00/libipw.h14
-rw-r--r--drivers/net/wireless/ipw2x00/libipw_module.c13
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c88
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c143
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c87
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h33
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c1408
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c217
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c274
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hw.h118
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ict.c307
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c1113
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c139
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c1333
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c416
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c430
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h174
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h108
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c787
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h58
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h189
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c10
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c823
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c682
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c1074
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c178
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Makefile3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c17
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c14
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c13
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.c15
-rw-r--r--drivers/net/wireless/iwmc3200wifi/hal.h5
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c76
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/trace.h283
-rw-r--r--drivers/net/wireless/iwmc3200wifi/tx.c8
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h2
-rw-r--r--drivers/net/wireless/libertas/assoc.c22
-rw-r--r--drivers/net/wireless/libertas/debugfs.c2
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c22
-rw-r--r--drivers/net/wireless/libertas/main.c13
-rw-r--r--drivers/net/wireless/libertas/rx.c50
-rw-r--r--drivers/net/wireless/libertas/wext.c4
-rw-r--r--drivers/net/wireless/libertas_tf/main.c14
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c45
-rw-r--r--drivers/net/wireless/mwl8k.c24
-rw-r--r--drivers/net/wireless/orinoco/Kconfig13
-rw-r--r--drivers/net/wireless/orinoco/hw.c13
-rw-r--r--drivers/net/wireless/orinoco/hw.h1
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c79
-rw-r--r--drivers/net/wireless/orinoco/wext.c94
-rw-r--r--drivers/net/wireless/p54/main.c1
-rw-r--r--drivers/net/wireless/p54/p54pci.c26
-rw-r--r--drivers/net/wireless/p54/txrx.c2
-rw-r--r--drivers/net/wireless/prism54/islpci_dev.c16
-rw-r--r--drivers/net/wireless/prism54/islpci_eth.c8
-rw-r--r--drivers/net/wireless/prism54/islpci_mgt.c8
-rw-r--r--drivers/net/wireless/prism54/oid_mgt.c2
-rw-r--r--drivers/net/wireless/ray_cs.c236
-rw-r--r--drivers/net/wireless/rndis_wlan.c370
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c9
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h108
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c467
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c124
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c76
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h29
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c4
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig24
-rw-r--r--drivers/net/wireless/wl12xx/Makefile6
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_boot.c3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_io.h20
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c8
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_sdio.c52
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h59
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c179
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h157
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c26
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c335
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h27
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h488
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c12
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h8
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c57
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c87
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h135
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c1214
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c94
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_sdio.c291
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c315
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h96
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c1
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c133
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h9
-rw-r--r--drivers/net/wireless/wl3501_cs.c52
-rw-r--r--drivers/net/wireless/zd1201.c6
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c13
-rw-r--r--drivers/net/xilinx_emaclite.c1
-rw-r--r--drivers/net/yellowfin.c12
-rw-r--r--drivers/s390/net/qeth_l2_main.c5
-rw-r--r--drivers/s390/net/qeth_l3_main.c2
-rw-r--r--drivers/scsi/fcoe/fcoe.c18
-rw-r--r--drivers/scsi/iscsi_tcp.c4
-rw-r--r--drivers/ssb/driver_chipcommon.c1
-rw-r--r--drivers/staging/arlan/arlan-main.c9
-rw-r--r--drivers/staging/et131x/et131x_netdev.c6
-rw-r--r--drivers/staging/slicoss/slicoss.c6
-rw-r--r--drivers/staging/vt6655/device_main.c6
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/wavelan/wavelan.c10
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c12
-rw-r--r--drivers/staging/winbond/wbusb.c6
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c12
-rw-r--r--drivers/vhost/net.c4
-rw-r--r--drivers/vhost/vhost.c11
-rw-r--r--include/linux/caif/caif_socket.h164
-rw-r--r--include/linux/caif/if_caif.h34
-rw-r--r--include/linux/can/dev.h1
-rw-r--r--include/linux/can/platform/mcp251x.h4
-rw-r--r--include/linux/ethtool.h116
-rw-r--r--include/linux/fib_rules.h8
-rw-r--r--include/linux/genetlink.h8
-rw-r--r--include/linux/ieee80211.h3
-rw-r--r--include/linux/if_arp.h1
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/if_link.h33
-rw-r--r--include/linux/if_packet.h1
-rw-r--r--include/linux/if_pppol2tp.h16
-rw-r--r--include/linux/if_pppox.h9
-rw-r--r--include/linux/l2tp.h163
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/mod_devicetable.h26
-rw-r--r--include/linux/mroute.h20
-rw-r--r--include/linux/netdevice.h260
-rw-r--r--include/linux/nl80211.h58
-rw-r--r--include/linux/notifier.h9
-rw-r--r--include/linux/phy.h13
-rw-r--r--include/linux/ppp_channel.h3
-rw-r--r--include/linux/rculist.h13
-rw-r--r--include/linux/skbuff.h8
-rw-r--r--include/linux/snmp.h2
-rw-r--r--include/linux/socket.h5
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/tipc.h30
-rw-r--r--include/linux/tty.h4
-rw-r--r--include/linux/wireless.h4
-rw-r--r--include/net/caif/caif_dev.h90
-rw-r--r--include/net/caif/caif_device.h55
-rw-r--r--include/net/caif/caif_layer.h283
-rw-r--r--include/net/caif/cfcnfg.h133
-rw-r--r--include/net/caif/cfctrl.h138
-rw-r--r--include/net/caif/cffrml.h16
-rw-r--r--include/net/caif/cfmuxl.h22
-rw-r--r--include/net/caif/cfpkt.h274
-rw-r--r--include/net/caif/cfserl.h12
-rw-r--r--include/net/caif/cfsrvl.h34
-rw-r--r--include/net/cfg80211.h30
-rw-r--r--include/net/dn_fib.h4
-rw-r--r--include/net/dst.h15
-rw-r--r--include/net/fib_rules.h1
-rw-r--r--include/net/flow.h23
-rw-r--r--include/net/icmp.h11
-rw-r--r--include/net/if_inet6.h13
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h5
-rw-r--r--include/net/inet_sock.h38
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_fib.h29
-rw-r--r--include/net/ip6_route.h4
-rw-r--r--include/net/ipv6.h5
-rw-r--r--include/net/iw_handler.h2
-rw-r--r--include/net/mac80211.h70
-rw-r--r--include/net/netns/ipv4.h14
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/raw.h13
-rw-r--r--include/net/sch_generic.h1
-rw-r--r--include/net/sctp/sctp.h2
-rw-r--r--include/net/snmp.h29
-rw-r--r--include/net/sock.h57
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/net/xfrm.h19
-rw-r--r--net/802/garp.c4
-rw-r--r--net/8021q/vlan.c8
-rw-r--r--net/8021q/vlan_dev.c14
-rw-r--r--net/Kconfig8
-rw-r--r--net/Makefile2
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/common.c12
-rw-r--r--net/atm/proc.c10
-rw-r--r--net/atm/signaling.c2
-rw-r--r--net/atm/svc.c62
-rw-r--r--net/ax25/af_ax25.c8
-rw-r--r--net/bluetooth/af_bluetooth.c6
-rw-r--r--net/bluetooth/bnep/core.c8
-rw-r--r--net/bluetooth/bnep/netdev.c20
-rw-r--r--net/bluetooth/cmtp/cmtp.h2
-rw-r--r--net/bluetooth/cmtp/core.c4
-rw-r--r--net/bluetooth/hidp/core.c10
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bluetooth/l2cap.c4
-rw-r--r--net/bluetooth/rfcomm/sock.c8
-rw-r--r--net/bluetooth/sco.c4
-rw-r--r--net/bridge/br_device.c43
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c6
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_notify.c4
-rw-r--r--net/bridge/br_private.h9
-rw-r--r--net/caif/Kconfig48
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c413
-rw-r--r--net/caif/caif_socket.c1391
-rw-r--r--net/caif/cfcnfg.c530
-rw-r--r--net/caif/cfctrl.c664
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c246
-rw-r--r--net/caif/cfpkt_skbuff.c571
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c192
-rw-r--r--net/caif/cfsrvl.c185
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c451
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/datagram.c6
-rw-r--r--net/core/dev.c1203
-rw-r--r--net/core/dev_addr_lists.c741
-rw-r--r--net/core/dev_mcast.c232
-rw-r--r--net/core/dst.c45
-rw-r--r--net/core/ethtool.c148
-rw-r--r--net/core/fib_rules.c29
-rw-r--r--net/core/flow.c405
-rw-r--r--net/core/net-sysfs.c318
-rw-r--r--net/core/pktgen.c58
-rw-r--r--net/core/rtnetlink.c59
-rw-r--r--net/core/skbuff.c8
-rw-r--r--net/core/sock.c24
-rw-r--r--net/core/stream.c16
-rw-r--r--net/core/sysctl_net_core.c68
-rw-r--r--net/dccp/ccids/ccid3.c2
-rw-r--r--net/dccp/dccp.h4
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c7
-rw-r--r--net/dccp/output.c10
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dccp/timer.c4
-rw-r--r--net/decnet/af_decnet.c32
-rw-r--r--net/decnet/dn_dev.c12
-rw-r--r--net/decnet/dn_rules.c22
-rw-r--r--net/dsa/slave.c14
-rw-r--r--net/ethernet/eth.c2
-rw-r--r--net/ipv4/Kconfig22
-rw-r--r--net/ipv4/af_inet.c49
-rw-r--r--net/ipv4/devinet.c4
-rw-r--r--net/ipv4/fib_rules.c22
-rw-r--r--net/ipv4/icmp.c5
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/inet_connection_sock.c4
-rw-r--r--net/ipv4/ip_output.c4
-rw-r--r--net/ipv4/ip_sockglue.c4
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c828
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c4
-rw-r--r--net/ipv4/proc.c1
-rw-r--r--net/ipv4/route.c29
-rw-r--r--net/ipv4/tcp.c17
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_ipv4.c37
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c9
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv4/xfrm4_policy.c22
-rw-r--r--net/ipv6/addrconf.c805
-rw-r--r--net/ipv6/fib6_rules.c3
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/inet6_connection_sock.c4
-rw-r--r--net/ipv6/ip6_fib.c16
-rw-r--r--net/ipv6/ip6_output.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c25
-rw-r--r--net/ipv6/mcast.c4
-rw-r--r--net/ipv6/netfilter/ip6t_hbh.c4
-rw-r--r--net/ipv6/proc.c1
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/ipv6/udp.c4
-rw-r--r--net/ipv6/xfrm6_policy.c31
-rw-r--r--net/irda/af_irda.c14
-rw-r--r--net/irda/ircomm/ircomm_param.c2
-rw-r--r--net/iucv/af_iucv.c12
-rw-r--r--net/l2tp/Kconfig107
-rw-r--r--net/l2tp/Makefile12
-rw-r--r--net/l2tp/l2tp_core.c1693
-rw-r--r--net/l2tp/l2tp_core.h304
-rw-r--r--net/l2tp/l2tp_debugfs.c341
-rw-r--r--net/l2tp/l2tp_eth.c361
-rw-r--r--net/l2tp/l2tp_ip.c679
-rw-r--r--net/l2tp/l2tp_netlink.c840
-rw-r--r--net/l2tp/l2tp_ppp.c1837
-rw-r--r--net/llc/af_llc.c12
-rw-r--r--net/llc/llc_core.c6
-rw-r--r--net/mac80211/Kconfig17
-rw-r--r--net/mac80211/agg-rx.c72
-rw-r--r--net/mac80211/agg-tx.c14
-rw-r--r--net/mac80211/cfg.c34
-rw-r--r--net/mac80211/debugfs_netdev.c12
-rw-r--r--net/mac80211/debugfs_sta.c14
-rw-r--r--net/mac80211/driver-ops.h8
-rw-r--r--net/mac80211/driver-trace.h275
-rw-r--r--net/mac80211/ht.c3
-rw-r--r--net/mac80211/ibss.c16
-rw-r--r--net/mac80211/ieee80211_i.h29
-rw-r--r--net/mac80211/iface.c124
-rw-r--r--net/mac80211/main.c7
-rw-r--r--net/mac80211/mesh.c4
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_hwmp.c4
-rw-r--r--net/mac80211/mesh_plink.c2
-rw-r--r--net/mac80211/mlme.c181
-rw-r--r--net/mac80211/pm.c2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h11
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c41
-rw-r--r--net/mac80211/rx.c100
-rw-r--r--net/mac80211/scan.c73
-rw-r--r--net/mac80211/sta_info.c75
-rw-r--r--net/mac80211/sta_info.h12
-rw-r--r--net/mac80211/status.c14
-rw-r--r--net/mac80211/tx.c9
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/mac80211/work.c7
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netlink/genetlink.c6
-rw-r--r--net/netrom/af_netrom.c8
-rw-r--r--net/packet/af_packet.c69
-rw-r--r--net/rds/af_rds.c11
-rw-r--r--net/rds/cong.c2
-rw-r--r--net/rds/ib_cm.c3
-rw-r--r--net/rds/ib_rdma.c5
-rw-r--r--net/rds/ib_recv.c4
-rw-r--r--net/rds/ib_send.c20
-rw-r--r--net/rds/iw_cm.c4
-rw-r--r--net/rds/iw_recv.c4
-rw-r--r--net/rds/iw_send.c3
-rw-r--r--net/rds/loop.c7
-rw-r--r--net/rds/rdma.c4
-rw-r--r--net/rds/rdma_transport.c5
-rw-r--r--net/rds/rds.h4
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/send.c40
-rw-r--r--net/rds/tcp_recv.c1
-rw-r--r--net/rds/tcp_send.c4
-rw-r--r--net/rds/threads.c2
-rw-r--r--net/rfkill/core.c53
-rw-r--r--net/rose/af_rose.c8
-rw-r--r--net/rxrpc/af_rxrpc.c4
-rw-r--r--net/sched/act_api.c45
-rw-r--r--net/sched/cls_api.c30
-rw-r--r--net/sched/cls_u32.c4
-rw-r--r--net/sched/sch_api.c112
-rw-r--r--net/sched/sch_generic.c15
-rw-r--r--net/sched/sch_sfq.c10
-rw-r--r--net/sctp/ipv6.c27
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c21
-rw-r--r--net/socket.c5
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_token.c2
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/svcsock.c24
-rw-r--r--net/sunrpc/xprt.c2
-rw-r--r--net/tipc/bcast.c35
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/link.c19
-rw-r--r--net/tipc/net.c4
-rw-r--r--net/tipc/node.c2
-rw-r--r--net/tipc/socket.c26
-rw-r--r--net/tipc/subscr.c15
-rw-r--r--net/unix/af_unix.c10
-rw-r--r--net/wimax/op-reset.c2
-rw-r--r--net/wimax/op-state-get.c2
-rw-r--r--net/wireless/core.h15
-rw-r--r--net/wireless/mlme.c52
-rw-r--r--net/wireless/nl80211.c153
-rw-r--r--net/wireless/nl80211.h6
-rw-r--r--net/wireless/reg.c6
-rw-r--r--net/wireless/sme.c15
-rw-r--r--net/wireless/util.c24
-rw-r--r--net/wireless/wext-core.c134
-rw-r--r--net/x25/af_x25.c8
-rw-r--r--net/xfrm/xfrm_hash.h3
-rw-r--r--net/xfrm/xfrm_policy.c847
-rw-r--r--net/xfrm/xfrm_state.c5
-rw-r--r--net/xfrm/xfrm_user.c10
-rw-r--r--scripts/mod/file2alias.c26
835 files changed, 48296 insertions, 20551 deletions
diff --git a/Documentation/ABI/obsolete/sysfs-class-rfkill b/Documentation/ABI/obsolete/sysfs-class-rfkill
new file mode 100644
index 000000000000..4201d5b05515
--- /dev/null
+++ b/Documentation/ABI/obsolete/sysfs-class-rfkill
@@ -0,0 +1,29 @@
1rfkill - radio frequency (RF) connector kill switch support
2
3For details to this subsystem look at Documentation/rfkill.txt.
4
5What: /sys/class/rfkill/rfkill[0-9]+/state
6Date: 09-Jul-2007
7KernelVersion v2.6.22
8Contact: linux-wireless@vger.kernel.org
9Description: Current state of the transmitter.
10 This file is deprecated and sheduled to be removed in 2014,
11 because its not possible to express the 'soft and hard block'
12 state of the rfkill driver.
13Values: A numeric value.
14 0: RFKILL_STATE_SOFT_BLOCKED
15 transmitter is turned off by software
16 1: RFKILL_STATE_UNBLOCKED
17 transmitter is (potentially) active
18 2: RFKILL_STATE_HARD_BLOCKED
19 transmitter is forced off by something outside of
20 the driver's control.
21
22What: /sys/class/rfkill/rfkill[0-9]+/claim
23Date: 09-Jul-2007
24KernelVersion v2.6.22
25Contact: linux-wireless@vger.kernel.org
26Description: This file is deprecated because there no longer is a way to
27 claim just control over a single rfkill instance.
28 This file is scheduled to be removed in 2012.
29Values: 0: Kernel handles events
diff --git a/Documentation/ABI/stable/sysfs-class-rfkill b/Documentation/ABI/stable/sysfs-class-rfkill
new file mode 100644
index 000000000000..097f522c33bb
--- /dev/null
+++ b/Documentation/ABI/stable/sysfs-class-rfkill
@@ -0,0 +1,67 @@
1rfkill - radio frequency (RF) connector kill switch support
2
3For details to this subsystem look at Documentation/rfkill.txt.
4
5For the deprecated /sys/class/rfkill/*/state and
6/sys/class/rfkill/*/claim knobs of this interface look in
7Documentation/ABI/obsolete/sysfs-class-rfkill.
8
9What: /sys/class/rfkill
10Date: 09-Jul-2007
11KernelVersion: v2.6.22
12Contact: linux-wireless@vger.kernel.org,
13Description: The rfkill class subsystem folder.
14 Each registered rfkill driver is represented by an rfkillX
15 subfolder (X being an integer > 0).
16
17
18What: /sys/class/rfkill/rfkill[0-9]+/name
19Date: 09-Jul-2007
20KernelVersion v2.6.22
21Contact: linux-wireless@vger.kernel.org
22Description: Name assigned by driver to this key (interface or driver name).
23Values: arbitrary string.
24
25
26What: /sys/class/rfkill/rfkill[0-9]+/type
27Date: 09-Jul-2007
28KernelVersion v2.6.22
29Contact: linux-wireless@vger.kernel.org
30Description: Driver type string ("wlan", "bluetooth", etc).
31Values: See include/linux/rfkill.h.
32
33
34What: /sys/class/rfkill/rfkill[0-9]+/persistent
35Date: 09-Jul-2007
36KernelVersion v2.6.22
37Contact: linux-wireless@vger.kernel.org
38Description: Whether the soft blocked state is initialised from non-volatile
39 storage at startup.
40Values: A numeric value.
41 0: false
42 1: true
43
44
45What: /sys/class/rfkill/rfkill[0-9]+/hard
46Date: 12-March-2010
47KernelVersion v2.6.34
48Contact: linux-wireless@vger.kernel.org
49Description: Current hardblock state. This file is read only.
50Values: A numeric value.
51 0: inactive
52 The transmitter is (potentially) active.
53 1: active
54 The transmitter is forced off by something outside of
55 the driver's control.
56
57
58What: /sys/class/rfkill/rfkill[0-9]+/soft
59Date: 12-March-2010
60KernelVersion v2.6.34
61Contact: linux-wireless@vger.kernel.org
62Description: Current softblock state. This file is read and write.
63Values: A numeric value.
64 0: inactive
65 The transmitter is (potentially) active.
66 1: active
67 The transmitter is turned off by software.
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ed511af0f79a..116a13c4f13f 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -543,6 +543,24 @@ Who: Eric Miao <eric.y.miao@gmail.com>
543 543
544---------------------------- 544----------------------------
545 545
546What: sysfs-class-rfkill state file
547When: Feb 2014
548Files: net/rfkill/core.c
549Why: Documented as obsolete since Feb 2010. This file is limited to 3
550 states while the rfkill drivers can have 4 states.
551Who: anybody or Florian Mickler <florian@mickler.org>
552
553----------------------------
554
555What: sysfs-class-rfkill claim file
556When: Feb 2012
557Files: net/rfkill/core.c
558Why: It is not possible to claim an rfkill driver since 2007. This is
559 Documented as obsolete since Feb 2010.
560Who: anybody or Florian Mickler <florian@mickler.org>
561
562----------------------------
563
546What: capifs 564What: capifs
547When: February 2011 565When: February 2011
548Files: drivers/isdn/capi/capifs.* 566Files: drivers/isdn/capi/capifs.*
@@ -589,3 +607,24 @@ Why: Useful in 2003, implementation is a hack.
589 Generally invoked by accident today. 607 Generally invoked by accident today.
590 Seen as doing more harm than good. 608 Seen as doing more harm than good.
591Who: Len Brown <len.brown@intel.com> 609Who: Len Brown <len.brown@intel.com>
610
611----------------------------
612
613What: iwlwifi 50XX module parameters
614When: 2.6.40
615Why: The "..50" modules parameters were used to configure 5000 series and
616 up devices; different set of module parameters also available for 4965
617 with same functionalities. Consolidate both set into single place
618 in drivers/net/wireless/iwlwifi/iwl-agn.c
619
620Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
621
622----------------------------
623
624What: iwl4965 alias support
625When: 2.6.40
626Why: Internal alias support has been present in module-init-tools for some
627 time, the MODULE_ALIAS("iwl4965") boilerplate aliases can be removed
628 with no impact.
629
630Who: Wey-Yi Guy <wey-yi.w.guy@intel.com>
diff --git a/Documentation/networking/caif/Linux-CAIF.txt b/Documentation/networking/caif/Linux-CAIF.txt
new file mode 100644
index 000000000000..7fe7a9a33a4f
--- /dev/null
+++ b/Documentation/networking/caif/Linux-CAIF.txt
@@ -0,0 +1,212 @@
1Linux CAIF
2===========
3copyright (C) ST-Ericsson AB 2010
4Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
5License terms: GNU General Public License (GPL) version 2
6
7
8Introduction
9------------
10CAIF is a MUX protocol used by ST-Ericsson cellular modems for
11communication between Modem and host. The host processes can open virtual AT
12channels, initiate GPRS Data connections, Video channels and Utility Channels.
13The Utility Channels are general purpose pipes between modem and host.
14
15ST-Ericsson modems support a number of transports between modem
16and host. Currently, UART and Loopback are available for Linux.
17
18
19Architecture:
20------------
21The implementation of CAIF is divided into:
22* CAIF Socket Layer, Kernel API, and Net Device.
23* CAIF Core Protocol Implementation
24* CAIF Link Layer, implemented as NET devices.
25
26
27 RTNL
28 !
29 ! +------+ +------+ +------+
30 ! +------+! +------+! +------+!
31 ! ! Sock !! !Kernel!! ! Net !!
32 ! ! API !+ ! API !+ ! Dev !+ <- CAIF Client APIs
33 ! +------+ +------! +------+
34 ! ! ! !
35 ! +----------!----------+
36 ! +------+ <- CAIF Protocol Implementation
37 +-------> ! CAIF !
38 ! Core !
39 +------+
40 +--------!--------+
41 ! !
42 +------+ +-----+
43 ! ! ! TTY ! <- Link Layer (Net Devices)
44 +------+ +-----+
45
46
47Using the Kernel API
48----------------------
49The Kernel API is used for accessing CAIF channels from the
50kernel.
51The user of the API has to implement two callbacks for receive
52and control.
53The receive callback gives a CAIF packet as a SKB. The control
54callback will
55notify of channel initialization complete, and flow-on/flow-
56off.
57
58
59 struct caif_device caif_dev = {
60 .caif_config = {
61 .name = "MYDEV"
62 .type = CAIF_CHTY_AT
63 }
64 .receive_cb = my_receive,
65 .control_cb = my_control,
66 };
67 caif_add_device(&caif_dev);
68 caif_transmit(&caif_dev, skb);
69
70See the caif_kernel.h for details about the CAIF kernel API.
71
72
73I M P L E M E N T A T I O N
74===========================
75===========================
76
77CAIF Core Protocol Layer
78=========================================
79
80CAIF Core layer implements the CAIF protocol as defined by ST-Ericsson.
81It implements the CAIF protocol stack in a layered approach, where
82each layer described in the specification is implemented as a separate layer.
83The architecture is inspired by the design patterns "Protocol Layer" and
84"Protocol Packet".
85
86== CAIF structure ==
87The Core CAIF implementation contains:
88 - Simple implementation of CAIF.
89 - Layered architecture (a la Streams), each layer in the CAIF
90 specification is implemented in a separate c-file.
91 - Clients must implement PHY layer to access physical HW
92 with receive and transmit functions.
93 - Clients must call configuration function to add PHY layer.
94 - Clients must implement CAIF layer to consume/produce
95 CAIF payload with receive and transmit functions.
96 - Clients must call configuration function to add and connect the
97 Client layer.
98 - When receiving / transmitting CAIF Packets (cfpkt), ownership is passed
99 to the called function (except for framing layers' receive functions
100 or if a transmit function returns an error, in which case the caller
101 must free the packet).
102
103Layered Architecture
104--------------------
105The CAIF protocol can be divided into two parts: Support functions and Protocol
106Implementation. The support functions include:
107
108 - CFPKT CAIF Packet. Implementation of CAIF Protocol Packet. The
109 CAIF Packet has functions for creating, destroying and adding content
110 and for adding/extracting header and trailers to protocol packets.
111
112 - CFLST CAIF list implementation.
113
114 - CFGLUE CAIF Glue. Contains OS Specifics, such as memory
115 allocation, endianness, etc.
116
117The CAIF Protocol implementation contains:
118
119 - CFCNFG CAIF Configuration layer. Configures the CAIF Protocol
120 Stack and provides a Client interface for adding Link-Layer and
121 Driver interfaces on top of the CAIF Stack.
122
123 - CFCTRL CAIF Control layer. Encodes and Decodes control messages
124 such as enumeration and channel setup. Also matches request and
125 response messages.
126
127 - CFSERVL General CAIF Service Layer functionality; handles flow
128 control and remote shutdown requests.
129
130 - CFVEI CAIF VEI layer. Handles CAIF AT Channels on VEI (Virtual
131 External Interface). This layer encodes/decodes VEI frames.
132
133 - CFDGML CAIF Datagram layer. Handles CAIF Datagram layer (IP
134 traffic), encodes/decodes Datagram frames.
135
136 - CFMUX CAIF Mux layer. Handles multiplexing between multiple
137 physical bearers and multiple channels such as VEI, Datagram, etc.
138 The MUX keeps track of the existing CAIF Channels and
139 Physical Instances and selects the apropriate instance based
140 on Channel-Id and Physical-ID.
141
142 - CFFRML CAIF Framing layer. Handles Framing i.e. Frame length
143 and frame checksum.
144
145 - CFSERL CAIF Serial layer. Handles concatenation/split of frames
146 into CAIF Frames with correct length.
147
148
149
150 +---------+
151 | Config |
152 | CFCNFG |
153 +---------+
154 !
155 +---------+ +---------+ +---------+
156 | AT | | Control | | Datagram|
157 | CFVEIL | | CFCTRL | | CFDGML |
158 +---------+ +---------+ +---------+
159 \_____________!______________/
160 !
161 +---------+
162 | MUX |
163 | |
164 +---------+
165 _____!_____
166 / \
167 +---------+ +---------+
168 | CFFRML | | CFFRML |
169 | Framing | | Framing |
170 +---------+ +---------+
171 ! !
172 +---------+ +---------+
173 | | | Serial |
174 | | | CFSERL |
175 +---------+ +---------+
176
177
178In this layered approach the following "rules" apply.
179 - All layers embed the same structure "struct cflayer"
180 - A layer does not depend on any other layer's private data.
181 - Layers are stacked by setting the pointers
182 layer->up , layer->dn
183 - In order to send data upwards, each layer should do
184 layer->up->receive(layer->up, packet);
185 - In order to send data downwards, each layer should do
186 layer->dn->transmit(layer->dn, packet);
187
188
189Linux Driver Implementation
190===========================
191
192Linux GPRS Net Device and CAIF socket are implemented on top of the
193CAIF Core protocol. The Net device and CAIF socket have an instance of
194'struct cflayer', just like the CAIF Core protocol stack.
195Net device and Socket implement the 'receive()' function defined by
196'struct cflayer', just like the rest of the CAIF stack. In this way, transmit and
197receive of packets is handled as by the rest of the layers: the 'dn->transmit()'
198function is called in order to transmit data.
199
200The layer on top of the CAIF Core implementation is
201sometimes referred to as the "Client layer".
202
203
204Configuration of Link Layer
205---------------------------
206The Link Layer is implemented as Linux net devices (struct net_device).
207Payload handling and registration is done using standard Linux mechanisms.
208
209The CAIF Protocol relies on a loss-less link layer without implementing
210retransmission. This implies that packet drops must not happen.
211Therefore a flow-control mechanism is implemented where the physical
212interface can initiate flow stop for all CAIF Channels.
diff --git a/Documentation/networking/caif/README b/Documentation/networking/caif/README
new file mode 100644
index 000000000000..757ccfaa1385
--- /dev/null
+++ b/Documentation/networking/caif/README
@@ -0,0 +1,109 @@
1Copyright (C) ST-Ericsson AB 2010
2Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
3License terms: GNU General Public License (GPL) version 2
4---------------------------------------------------------
5
6=== Start ===
7If you have compiled CAIF for modules do:
8
9$modprobe crc_ccitt
10$modprobe caif
11$modprobe caif_socket
12$modprobe chnl_net
13
14
15=== Preparing the setup with a STE modem ===
16
17If you are working on integration of CAIF you should make sure
18that the kernel is built with module support.
19
20There are some things that need to be tweaked to get the host TTY correctly
21set up to talk to the modem.
22Since the CAIF stack is running in the kernel and we want to use the existing
23TTY, we are installing our physical serial driver as a line discipline above
24the TTY device.
25
26To achieve this we need to install the N_CAIF ldisc from user space.
27The benefit is that we can hook up to any TTY.
28
29The use of Start-of-frame-extension (STX) must also be set as
30module parameter "ser_use_stx".
31
32Normally Frame Checksum is always used on UART, but this is also provided as a
33module parameter "ser_use_fcs".
34
35$ modprobe caif_serial ser_ttyname=/dev/ttyS0 ser_use_stx=yes
36$ ifconfig caif_ttyS0 up
37
38PLEASE NOTE: There is a limitation in Android shell.
39 It only accepts one argument to insmod/modprobe!
40
41=== Trouble shooting ===
42
43There are debugfs parameters provided for serial communication.
44/sys/kernel/debug/caif_serial/<tty-name>/
45
46* ser_state: Prints the bit-mask status where
47 - 0x02 means SENDING, this is a transient state.
48 - 0x10 means FLOW_OFF_SENT, i.e. the previous frame has not been sent
49 and is blocking further send operation. Flow OFF has been propagated
50 to all CAIF Channels using this TTY.
51
52* tty_status: Prints the bit-mask tty status information
53 - 0x01 - tty->warned is on.
54 - 0x02 - tty->low_latency is on.
55 - 0x04 - tty->packed is on.
56 - 0x08 - tty->flow_stopped is on.
57 - 0x10 - tty->hw_stopped is on.
58 - 0x20 - tty->stopped is on.
59
60* last_tx_msg: Binary blob Prints the last transmitted frame.
61 This can be printed with
62 $od --format=x1 /sys/kernel/debug/caif_serial/<tty>/last_rx_msg.
63 The first two tx messages sent look like this. Note: The initial
64 byte 02 is start of frame extension (STX) used for re-syncing
65 upon errors.
66
67 - Enumeration:
68 0000000 02 05 00 00 03 01 d2 02
69 | | | | | |
70 STX(1) | | | |
71 Length(2)| | |
72 Control Channel(1)
73 Command:Enumeration(1)
74 Link-ID(1)
75 Checksum(2)
76 - Channel Setup:
77 0000000 02 07 00 00 00 21 a1 00 48 df
78 | | | | | | | |
79 STX(1) | | | | | |
80 Length(2)| | | | |
81 Control Channel(1)
82 Command:Channel Setup(1)
83 Channel Type(1)
84 Priority and Link-ID(1)
85 Endpoint(1)
86 Checksum(2)
87
88* last_rx_msg: Prints the last transmitted frame.
89 The RX messages for LinkSetup look almost identical but they have the
90 bit 0x20 set in the command bit, and Channel Setup has added one byte
91 before Checksum containing Channel ID.
92 NOTE: Several CAIF Messages might be concatenated. The maximum debug
93 buffer size is 128 bytes.
94
95== Error Scenarios:
96- last_tx_msg contains channel setup message and last_rx_msg is empty ->
97 The host seems to be able to send over the UART, at least the CAIF ldisc get
98 notified that sending is completed.
99
100- last_tx_msg contains enumeration message and last_rx_msg is empty ->
101 The host is not able to send the message from UART, the tty has not been
102 able to complete the transmit operation.
103
104- if /sys/kernel/debug/caif_serial/<tty>/tty_status is non-zero there
105 might be problems transmitting over UART.
106 E.g. host and modem wiring is not correct you will typically see
107 tty_status = 0x10 (hw_stopped) and ser_state = 0x10 (FLOW_OFF_SENT).
108 You will probably see the enumeration message in last_tx_message
109 and empty last_rx_message.
diff --git a/Documentation/networking/l2tp.txt b/Documentation/networking/l2tp.txt
index 63214b280e00..e7bf3979facb 100644
--- a/Documentation/networking/l2tp.txt
+++ b/Documentation/networking/l2tp.txt
@@ -1,44 +1,95 @@
1This brief document describes how to use the kernel's PPPoL2TP driver 1This document describes how to use the kernel's L2TP drivers to
2to provide L2TP functionality. L2TP is a protocol that tunnels one or 2provide L2TP functionality. L2TP is a protocol that tunnels one or
3more PPP sessions over a UDP tunnel. It is commonly used for VPNs 3more sessions over an IP tunnel. It is commonly used for VPNs
4(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP 4(L2TP/IPSec) and by ISPs to tunnel subscriber PPP sessions over an IP
5network infrastructure. 5network infrastructure. With L2TPv3, it is also useful as a Layer-2
6tunneling infrastructure.
7
8Features
9========
10
11L2TPv2 (PPP over L2TP (UDP tunnels)).
12L2TPv3 ethernet pseudowires.
13L2TPv3 PPP pseudowires.
14L2TPv3 IP encapsulation.
15Netlink sockets for L2TPv3 configuration management.
16
17History
18=======
19
20The original pppol2tp driver was introduced in 2.6.23 and provided
21L2TPv2 functionality (rfc2661). L2TPv2 is used to tunnel one or more PPP
22sessions over a UDP tunnel.
23
24L2TPv3 (rfc3931) changes the protocol to allow different frame types
25to be passed over an L2TP tunnel by moving the PPP-specific parts of
26the protocol out of the core L2TP packet headers. Each frame type is
27known as a pseudowire type. Ethernet, PPP, HDLC, Frame Relay and ATM
28pseudowires for L2TP are defined in separate RFC standards. Another
29change for L2TPv3 is that it can be carried directly over IP with no
30UDP header (UDP is optional). It is also possible to create static
31unmanaged L2TPv3 tunnels manually without a control protocol
32(userspace daemon) to manage them.
33
34To support L2TPv3, the original pppol2tp driver was split up to
35separate the L2TP and PPP functionality. Existing L2TPv2 userspace
36apps should be unaffected as the original pppol2tp sockets API is
37retained. L2TPv3, however, uses netlink to manage L2TPv3 tunnels and
38sessions.
6 39
7Design 40Design
8====== 41======
9 42
10The PPPoL2TP driver, drivers/net/pppol2tp.c, provides a mechanism by 43The L2TP protocol separates control and data frames. The L2TP kernel
11which PPP frames carried through an L2TP session are passed through 44drivers handle only L2TP data frames; control frames are always
12the kernel's PPP subsystem. The standard PPP daemon, pppd, handles all 45handled by userspace. L2TP control frames carry messages between L2TP
13PPP interaction with the peer. PPP network interfaces are created for 46clients/servers and are used to setup / teardown tunnels and
14each local PPP endpoint. 47sessions. An L2TP client or server is implemented in userspace.
15 48
16The L2TP protocol http://www.faqs.org/rfcs/rfc2661.html defines L2TP 49Each L2TP tunnel is implemented using a UDP or L2TPIP socket; L2TPIP
17control and data frames. L2TP control frames carry messages between 50provides L2TPv3 IP encapsulation (no UDP) and is implemented using a
18L2TP clients/servers and are used to setup / teardown tunnels and 51new l2tpip socket family. The tunnel socket is typically created by
19sessions. An L2TP client or server is implemented in userspace and 52userspace, though for unmanaged L2TPv3 tunnels, the socket can also be
20will use a regular UDP socket per tunnel. L2TP data frames carry PPP 53created by the kernel. Each L2TP session (pseudowire) gets a network
21frames, which may be PPP control or PPP data. The kernel's PPP 54interface instance. In the case of PPP, these interfaces are created
55indirectly by pppd using a pppol2tp socket. In the case of ethernet,
56the netdevice is created upon a netlink request to create an L2TPv3
57ethernet pseudowire.
58
59For PPP, the PPPoL2TP driver, net/l2tp/l2tp_ppp.c, provides a
60mechanism by which PPP frames carried through an L2TP session are
61passed through the kernel's PPP subsystem. The standard PPP daemon,
62pppd, handles all PPP interaction with the peer. PPP network
63interfaces are created for each local PPP endpoint. The kernel's PPP
22subsystem arranges for PPP control frames to be delivered to pppd, 64subsystem arranges for PPP control frames to be delivered to pppd,
23while data frames are forwarded as usual. 65while data frames are forwarded as usual.
24 66
67For ethernet, the L2TPETH driver, net/l2tp/l2tp_eth.c, implements a
68netdevice driver, managing virtual ethernet devices, one per
69pseudowire. These interfaces can be managed using standard Linux tools
70such as "ip" and "ifconfig". If only IP frames are passed over the
71tunnel, the interface can be given an IP addresses of itself and its
72peer. If non-IP frames are to be passed over the tunnel, the interface
73can be added to a bridge using brctl. All L2TP datapath protocol
74functions are handled by the L2TP core driver.
75
25Each tunnel and session within a tunnel is assigned a unique tunnel_id 76Each tunnel and session within a tunnel is assigned a unique tunnel_id
26and session_id. These ids are carried in the L2TP header of every 77and session_id. These ids are carried in the L2TP header of every
27control and data packet. The pppol2tp driver uses them to lookup 78control and data packet. (Actually, in L2TPv3, the tunnel_id isn't
28internal tunnel and/or session contexts. Zero tunnel / session ids are 79present in data frames - it is inferred from the IP connection on
29treated specially - zero ids are never assigned to tunnels or sessions 80which the packet was received.) The L2TP driver uses the ids to lookup
30in the network. In the driver, the tunnel context keeps a pointer to 81internal tunnel and/or session contexts to determine how to handle the
31the tunnel UDP socket. The session context keeps a pointer to the 82packet. Zero tunnel / session ids are treated specially - zero ids are
32PPPoL2TP socket, as well as other data that lets the driver interface 83never assigned to tunnels or sessions in the network. In the driver,
33to the kernel PPP subsystem. 84the tunnel context keeps a reference to the tunnel UDP or L2TPIP
34 85socket. The session context holds data that lets the driver interface
35Note that the pppol2tp kernel driver handles only L2TP data frames; 86to the kernel's network frame type subsystems, i.e. PPP, ethernet.
36L2TP control frames are simply passed up to userspace in the UDP 87
37tunnel socket. The kernel handles all datapath aspects of the 88Userspace Programming
38protocol, including data packet resequencing (if enabled). 89=====================
39 90
40There are a number of requirements on the userspace L2TP daemon in 91For L2TPv2, there are a number of requirements on the userspace L2TP
41order to use the pppol2tp driver. 92daemon in order to use the pppol2tp driver.
42 93
431. Use a UDP socket per tunnel. 941. Use a UDP socket per tunnel.
44 95
@@ -86,6 +137,35 @@ In addition to the standard PPP ioctls, a PPPIOCGL2TPSTATS is provided
86to retrieve tunnel and session statistics from the kernel using the 137to retrieve tunnel and session statistics from the kernel using the
87PPPoX socket of the appropriate tunnel or session. 138PPPoX socket of the appropriate tunnel or session.
88 139
140For L2TPv3, userspace must use the netlink API defined in
141include/linux/l2tp.h to manage tunnel and session contexts. The
142general procedure to create a new L2TP tunnel with one session is:-
143
1441. Open a GENL socket using L2TP_GENL_NAME for configuring the kernel
145 using netlink.
146
1472. Create a UDP or L2TPIP socket for the tunnel.
148
1493. Create a new L2TP tunnel using a L2TP_CMD_TUNNEL_CREATE
150 request. Set attributes according to desired tunnel parameters,
151 referencing the UDP or L2TPIP socket created in the previous step.
152
1534. Create a new L2TP session in the tunnel using a
154 L2TP_CMD_SESSION_CREATE request.
155
156The tunnel and all of its sessions are closed when the tunnel socket
157is closed. The netlink API may also be used to delete sessions and
158tunnels. Configuration and status info may be set or read using netlink.
159
160The L2TP driver also supports static (unmanaged) L2TPv3 tunnels. These
161are where there is no L2TP control message exchange with the peer to
162setup the tunnel; the tunnel is configured manually at each end of the
163tunnel. There is no need for an L2TP userspace application in this
164case -- the tunnel socket is created by the kernel and configured
165using parameters sent in the L2TP_CMD_TUNNEL_CREATE netlink
166request. The "ip" utility of iproute2 has commands for managing static
167L2TPv3 tunnels; do "ip l2tp help" for more information.
168
89Debugging 169Debugging
90========= 170=========
91 171
@@ -102,6 +182,69 @@ PPPOL2TP_MSG_CONTROL userspace - kernel interface
102PPPOL2TP_MSG_SEQ sequence numbers handling 182PPPOL2TP_MSG_SEQ sequence numbers handling
103PPPOL2TP_MSG_DATA data packets 183PPPOL2TP_MSG_DATA data packets
104 184
185If enabled, files under a l2tp debugfs directory can be used to dump
186kernel state about L2TP tunnels and sessions. To access it, the
187debugfs filesystem must first be mounted.
188
189# mount -t debugfs debugfs /debug
190
191Files under the l2tp directory can then be accessed.
192
193# cat /debug/l2tp/tunnels
194
195The debugfs files should not be used by applications to obtain L2TP
196state information because the file format is subject to change. It is
197implemented to provide extra debug information to help diagnose
198problems.) Users should use the netlink API.
199
200/proc/net/pppol2tp is also provided for backwards compaibility with
201the original pppol2tp driver. It lists information about L2TPv2
202tunnels and sessions only. Its use is discouraged.
203
204Unmanaged L2TPv3 Tunnels
205========================
206
207Some commercial L2TP products support unmanaged L2TPv3 ethernet
208tunnels, where there is no L2TP control protocol; tunnels are
209configured at each side manually. New commands are available in
210iproute2's ip utility to support this.
211
212To create an L2TPv3 ethernet pseudowire between local host 192.168.1.1
213and peer 192.168.1.2, using IP addresses 10.5.1.1 and 10.5.1.2 for the
214tunnel endpoints:-
215
216# modprobe l2tp_eth
217# modprobe l2tp_netlink
218
219# ip l2tp add tunnel tunnel_id 1 peer_tunnel_id 1 udp_sport 5000 \
220 udp_dport 5000 encap udp local 192.168.1.1 remote 192.168.1.2
221# ip l2tp add session tunnel_id 1 session_id 1 peer_session_id 1
222# ifconfig -a
223# ip addr add 10.5.1.2/32 peer 10.5.1.1/32 dev l2tpeth0
224# ifconfig l2tpeth0 up
225
226Choose IP addresses to be the address of a local IP interface and that
227of the remote system. The IP addresses of the l2tpeth0 interface can be
228anything suitable.
229
230Repeat the above at the peer, with ports, tunnel/session ids and IP
231addresses reversed. The tunnel and session IDs can be any non-zero
23232-bit number, but the values must be reversed at the peer.
233
234Host 1 Host2
235udp_sport=5000 udp_sport=5001
236udp_dport=5001 udp_dport=5000
237tunnel_id=42 tunnel_id=45
238peer_tunnel_id=45 peer_tunnel_id=42
239session_id=128 session_id=5196755
240peer_session_id=5196755 peer_session_id=128
241
242When done at both ends of the tunnel, it should be possible to send
243data over the network. e.g.
244
245# ping 10.5.1.1
246
247
105Sample Userspace Code 248Sample Userspace Code
106===================== 249=====================
107 250
@@ -158,12 +301,48 @@ Sample Userspace Code
158 } 301 }
159 return 0; 302 return 0;
160 303
304Internal Implementation
305=======================
306
307The driver keeps a struct l2tp_tunnel context per L2TP tunnel and a
308struct l2tp_session context for each session. The l2tp_tunnel is
309always associated with a UDP or L2TP/IP socket and keeps a list of
310sessions in the tunnel. The l2tp_session context keeps kernel state
311about the session. It has private data which is used for data specific
312to the session type. With L2TPv2, the session always carried PPP
313traffic. With L2TPv3, the session can also carry ethernet frames
314(ethernet pseudowire) or other data types such as ATM, HDLC or Frame
315Relay.
316
317When a tunnel is first opened, the reference count on the socket is
318increased using sock_hold(). This ensures that the kernel socket
319cannot be removed while L2TP's data structures reference it.
320
321Some L2TP sessions also have a socket (PPP pseudowires) while others
322do not (ethernet pseudowires). We can't use the socket reference count
323as the reference count for session contexts. The L2TP implementation
324therefore has its own internal reference counts on the session
325contexts.
326
327To Do
328=====
329
330Add L2TP tunnel switching support. This would route tunneled traffic
331from one L2TP tunnel into another. Specified in
332http://tools.ietf.org/html/draft-ietf-l2tpext-tunnel-switching-08
333
334Add L2TPv3 VLAN pseudowire support.
335
336Add L2TPv3 IP pseudowire support.
337
338Add L2TPv3 ATM pseudowire support.
339
161Miscellaneous 340Miscellaneous
162============ 341=============
163 342
164The PPPoL2TP driver was developed as part of the OpenL2TP project by 343The L2TP drivers were developed as part of the OpenL2TP project by
165Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server, 344Katalix Systems Ltd. OpenL2TP is a full-featured L2TP client / server,
166designed from the ground up to have the L2TP datapath in the 345designed from the ground up to have the L2TP datapath in the
167kernel. The project also implemented the pppol2tp plugin for pppd 346kernel. The project also implemented the pppol2tp plugin for pppd
168which allows pppd to use the kernel driver. Details can be found at 347which allows pppd to use the kernel driver. Details can be found at
169http://openl2tp.sourceforge.net. 348http://www.openl2tp.org.
diff --git a/Documentation/rfkill.txt b/Documentation/rfkill.txt
index b4860509c319..83668e5dd17f 100644
--- a/Documentation/rfkill.txt
+++ b/Documentation/rfkill.txt
@@ -99,37 +99,15 @@ system. Also, it is possible to switch all rfkill drivers (or all drivers of
99a specified type) into a state which also updates the default state for 99a specified type) into a state which also updates the default state for
100hotplugged devices. 100hotplugged devices.
101 101
102After an application opens /dev/rfkill, it can read the current state of 102After an application opens /dev/rfkill, it can read the current state of all
103all devices, and afterwards can poll the descriptor for hotplug or state 103devices. Changes can be either obtained by either polling the descriptor for
104change events. 104hotplug or state change events or by listening for uevents emitted by the
105 105rfkill core framework.
106Applications must ignore operations (the "op" field) they do not handle, 106
107this allows the API to be extended in the future. 107Additionally, each rfkill device is registered in sysfs and emits uevents.
108 108
109Additionally, each rfkill device is registered in sysfs and there has the 109rfkill devices issue uevents (with an action of "change"), with the following
110following attributes: 110environment variables set:
111
112 name: Name assigned by driver to this key (interface or driver name).
113 type: Driver type string ("wlan", "bluetooth", etc).
114 persistent: Whether the soft blocked state is initialised from
115 non-volatile storage at startup.
116 state: Current state of the transmitter
117 0: RFKILL_STATE_SOFT_BLOCKED
118 transmitter is turned off by software
119 1: RFKILL_STATE_UNBLOCKED
120 transmitter is (potentially) active
121 2: RFKILL_STATE_HARD_BLOCKED
122 transmitter is forced off by something outside of
123 the driver's control.
124 This file is deprecated because it can only properly show
125 three of the four possible states, soft-and-hard-blocked is
126 missing.
127 claim: 0: Kernel handles events
128 This file is deprecated because there no longer is a way to
129 claim just control over a single rfkill instance.
130
131rfkill devices also issue uevents (with an action of "change"), with the
132following environment variables set:
133 111
134RFKILL_NAME 112RFKILL_NAME
135RFKILL_STATE 113RFKILL_STATE
@@ -137,3 +115,7 @@ RFKILL_TYPE
137 115
138The contents of these variables corresponds to the "name", "state" and 116The contents of these variables corresponds to the "name", "state" and
139"type" sysfs files explained above. 117"type" sysfs files explained above.
118
119
120For further details consult Documentation/ABI/stable/dev-rfkill and
121Documentation/ABI/stable/sysfs-class-rfkill.
diff --git a/MAINTAINERS b/MAINTAINERS
index a0e3c3a47a51..405aea02dfbf 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1501,9 +1501,10 @@ M: Andy Whitcroft <apw@canonical.com>
1501S: Supported 1501S: Supported
1502F: scripts/checkpatch.pl 1502F: scripts/checkpatch.pl
1503 1503
1504CISCO 10G ETHERNET DRIVER 1504CISCO VIC ETHERNET NIC DRIVER
1505M: Scott Feldman <scofeldm@cisco.com> 1505M: Scott Feldman <scofeldm@cisco.com>
1506M: Joe Eykholt <jeykholt@cisco.com> 1506M: Vasanthy Kolluri <vkolluri@cisco.com>
1507M: Roopa Prabhu <roprabhu@cisco.com>
1507S: Supported 1508S: Supported
1508F: drivers/net/enic/ 1509F: drivers/net/enic/
1509 1510
@@ -3002,10 +3003,9 @@ F: net/ipv4/netfilter/ipt_MASQUERADE.c
3002IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER 3003IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
3003M: Francois Romieu <romieu@fr.zoreil.com> 3004M: Francois Romieu <romieu@fr.zoreil.com>
3004M: Sorbica Shieh <sorbica@icplus.com.tw> 3005M: Sorbica Shieh <sorbica@icplus.com.tw>
3005M: Jesse Huang <jesse@icplus.com.tw>
3006L: netdev@vger.kernel.org 3006L: netdev@vger.kernel.org
3007S: Maintained 3007S: Maintained
3008F: drivers/net/ipg.c 3008F: drivers/net/ipg.*
3009 3009
3010IPATH DRIVER 3010IPATH DRIVER
3011M: Ralph Campbell <infinipath@qlogic.com> 3011M: Ralph Campbell <infinipath@qlogic.com>
@@ -3852,7 +3852,6 @@ M: Ramkrishna Vepa <ram.vepa@neterion.com>
3852M: Rastapur Santosh <santosh.rastapur@neterion.com> 3852M: Rastapur Santosh <santosh.rastapur@neterion.com>
3853M: Sivakumar Subramani <sivakumar.subramani@neterion.com> 3853M: Sivakumar Subramani <sivakumar.subramani@neterion.com>
3854M: Sreenivasa Honnur <sreenivasa.honnur@neterion.com> 3854M: Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
3855M: Anil Murthy <anil.murthy@neterion.com>
3856L: netdev@vger.kernel.org 3855L: netdev@vger.kernel.org
3857W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous 3856W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
3858W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous 3857W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
diff --git a/arch/arm/mach-pxa/icontrol.c b/arch/arm/mach-pxa/icontrol.c
index 771137fc1a82..5ccb0ceff6c4 100644
--- a/arch/arm/mach-pxa/icontrol.c
+++ b/arch/arm/mach-pxa/icontrol.c
@@ -73,7 +73,6 @@ static struct pxa2xx_spi_chip mcp251x_chip_info4 = {
73 73
74static struct mcp251x_platform_data mcp251x_info = { 74static struct mcp251x_platform_data mcp251x_info = {
75 .oscillator_frequency = 16E6, 75 .oscillator_frequency = 16E6,
76 .model = CAN_MCP251X_MCP2515,
77 .board_specific_setup = NULL, 76 .board_specific_setup = NULL,
78 .power_enable = NULL, 77 .power_enable = NULL,
79 .transceiver_enable = NULL 78 .transceiver_enable = NULL
@@ -81,7 +80,7 @@ static struct mcp251x_platform_data mcp251x_info = {
81 80
82static struct spi_board_info mcp251x_board_info[] = { 81static struct spi_board_info mcp251x_board_info[] = {
83 { 82 {
84 .modalias = "mcp251x", 83 .modalias = "mcp2515",
85 .max_speed_hz = 6500000, 84 .max_speed_hz = 6500000,
86 .bus_num = 3, 85 .bus_num = 3,
87 .chip_select = 0, 86 .chip_select = 0,
@@ -90,7 +89,7 @@ static struct spi_board_info mcp251x_board_info[] = {
90 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1) 89 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ1)
91 }, 90 },
92 { 91 {
93 .modalias = "mcp251x", 92 .modalias = "mcp2515",
94 .max_speed_hz = 6500000, 93 .max_speed_hz = 6500000,
95 .bus_num = 3, 94 .bus_num = 3,
96 .chip_select = 1, 95 .chip_select = 1,
@@ -99,7 +98,7 @@ static struct spi_board_info mcp251x_board_info[] = {
99 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2) 98 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ2)
100 }, 99 },
101 { 100 {
102 .modalias = "mcp251x", 101 .modalias = "mcp2515",
103 .max_speed_hz = 6500000, 102 .max_speed_hz = 6500000,
104 .bus_num = 4, 103 .bus_num = 4,
105 .chip_select = 0, 104 .chip_select = 0,
@@ -108,7 +107,7 @@ static struct spi_board_info mcp251x_board_info[] = {
108 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3) 107 .irq = gpio_to_irq(ICONTROL_MCP251x_nIRQ3)
109 }, 108 },
110 { 109 {
111 .modalias = "mcp251x", 110 .modalias = "mcp2515",
112 .max_speed_hz = 6500000, 111 .max_speed_hz = 6500000,
113 .bus_num = 4, 112 .bus_num = 4,
114 .chip_select = 1, 113 .chip_select = 1,
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 39896d883584..dbd256966379 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -414,15 +414,13 @@ static int zeus_mcp2515_transceiver_enable(int enable)
414 414
415static struct mcp251x_platform_data zeus_mcp2515_pdata = { 415static struct mcp251x_platform_data zeus_mcp2515_pdata = {
416 .oscillator_frequency = 16*1000*1000, 416 .oscillator_frequency = 16*1000*1000,
417 .model = CAN_MCP251X_MCP2515,
418 .board_specific_setup = zeus_mcp2515_setup, 417 .board_specific_setup = zeus_mcp2515_setup,
419 .transceiver_enable = zeus_mcp2515_transceiver_enable,
420 .power_enable = zeus_mcp2515_transceiver_enable, 418 .power_enable = zeus_mcp2515_transceiver_enable,
421}; 419};
422 420
423static struct spi_board_info zeus_spi_board_info[] = { 421static struct spi_board_info zeus_spi_board_info[] = {
424 [0] = { 422 [0] = {
425 .modalias = "mcp251x", 423 .modalias = "mcp2515",
426 .platform_data = &zeus_mcp2515_pdata, 424 .platform_data = &zeus_mcp2515_pdata,
427 .irq = gpio_to_irq(ZEUS_CAN_GPIO), 425 .irq = gpio_to_irq(ZEUS_CAN_GPIO),
428 .max_speed_hz = 1*1000*1000, 426 .max_speed_hz = 1*1000*1000,
diff --git a/arch/microblaze/include/asm/system.h b/arch/microblaze/include/asm/system.h
index 59efb3fef957..48c4f0335e3f 100644
--- a/arch/microblaze/include/asm/system.h
+++ b/arch/microblaze/include/asm/system.h
@@ -12,6 +12,7 @@
12#include <asm/registers.h> 12#include <asm/registers.h>
13#include <asm/setup.h> 13#include <asm/setup.h>
14#include <asm/irqflags.h> 14#include <asm/irqflags.h>
15#include <asm/cache.h>
15 16
16#include <asm-generic/cmpxchg.h> 17#include <asm-generic/cmpxchg.h>
17#include <asm-generic/cmpxchg-local.h> 18#include <asm-generic/cmpxchg-local.h>
@@ -96,4 +97,14 @@ extern struct dentry *of_debugfs_root;
96 97
97#define arch_align_stack(x) (x) 98#define arch_align_stack(x) (x)
98 99
100/*
101 * MicroBlaze doesn't handle unaligned accesses in hardware.
102 *
103 * Based on this we force the IP header alignment in network drivers.
104 * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
105 * cacheline alignment of buffers.
106 */
107#define NET_IP_ALIGN 2
108#define NET_SKB_PAD L1_CACHE_BYTES
109
99#endif /* _ASM_MICROBLAZE_SYSTEM_H */ 110#endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
index b86712167eb8..b9101818b47b 100644
--- a/drivers/atm/atmtcp.c
+++ b/drivers/atm/atmtcp.c
@@ -68,7 +68,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
68 *(struct atm_vcc **) &new_msg->vcc = vcc; 68 *(struct atm_vcc **) &new_msg->vcc = vcc;
69 old_test = test_bit(flag,&vcc->flags); 69 old_test = test_bit(flag,&vcc->flags);
70 out_vcc->push(out_vcc,skb); 70 out_vcc->push(out_vcc,skb);
71 add_wait_queue(sk_atm(vcc)->sk_sleep, &wait); 71 add_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
72 while (test_bit(flag,&vcc->flags) == old_test) { 72 while (test_bit(flag,&vcc->flags) == old_test) {
73 mb(); 73 mb();
74 out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL; 74 out_vcc = PRIV(vcc->dev) ? PRIV(vcc->dev)->vcc : NULL;
@@ -80,7 +80,7 @@ static int atmtcp_send_control(struct atm_vcc *vcc,int type,
80 schedule(); 80 schedule();
81 } 81 }
82 set_current_state(TASK_RUNNING); 82 set_current_state(TASK_RUNNING);
83 remove_wait_queue(sk_atm(vcc)->sk_sleep, &wait); 83 remove_wait_queue(sk_sleep(sk_atm(vcc)), &wait);
84 return error; 84 return error;
85} 85}
86 86
@@ -105,7 +105,7 @@ static int atmtcp_recv_control(const struct atmtcp_control *msg)
105 msg->type); 105 msg->type);
106 return -EINVAL; 106 return -EINVAL;
107 } 107 }
108 wake_up(sk_atm(vcc)->sk_sleep); 108 wake_up(sk_sleep(sk_atm(vcc)));
109 return 0; 109 return 0;
110} 110}
111 111
diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
index 719ec5a0dca5..90a5a7cac740 100644
--- a/drivers/atm/eni.c
+++ b/drivers/atm/eni.c
@@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
1131 if (i == -1) 1131 if (i == -1)
1132 put_dma(tx->index,eni_dev->dma,&j,(unsigned long) 1132 put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
1133 skb->data, 1133 skb->data,
1134 skb->len - skb->data_len); 1134 skb_headlen(skb));
1135 else 1135 else
1136 put_dma(tx->index,eni_dev->dma,&j,(unsigned long) 1136 put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
1137 skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset, 1137 skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index c213e0da0343..56c2e99e458f 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
2664 2664
2665#ifdef USE_SCATTERGATHER 2665#ifdef USE_SCATTERGATHER
2666 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, 2666 tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
2667 skb->len - skb->data_len, PCI_DMA_TODEVICE); 2667 skb_headlen(skb), PCI_DMA_TODEVICE);
2668 tpd->iovec[slot].len = skb->len - skb->data_len; 2668 tpd->iovec[slot].len = skb_headlen(skb);
2669 ++slot; 2669 ++slot;
2670 2670
2671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2671 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index b7c813f4be43..65369d3fad81 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -877,7 +877,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
877 if (!mc_all_on) { 877 if (!mc_all_on) {
878 char *addrs; 878 char *addrs;
879 int i; 879 int i;
880 struct dev_mc_list *mcaddr; 880 struct netdev_hw_addr *ha;
881 881
882 addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC); 882 addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
883 if (!addrs) { 883 if (!addrs) {
@@ -885,9 +885,8 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
885 goto unlock; 885 goto unlock;
886 } 886 }
887 i = 0; 887 i = 0;
888 netdev_for_each_mc_addr(mcaddr, netdev) 888 netdev_for_each_mc_addr(ha, netdev)
889 memcpy(get_addr(addrs, i++), 889 memcpy(get_addr(addrs, i++), ha->addr, ETH_ALEN);
890 mcaddr->dmi_addr, ETH_ALEN);
891 890
892 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW + 891 perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
893 pft_entries_preallocated * 0x8; 892 pft_entries_preallocated * 0x8;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index b166bb75753d..3871ac663554 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -768,11 +768,8 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
768 } 768 }
769} 769}
770 770
771static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen, 771static int ipoib_mcast_addr_is_valid(const u8 *addr, const u8 *broadcast)
772 const u8 *broadcast)
773{ 772{
774 if (addrlen != INFINIBAND_ALEN)
775 return 0;
776 /* reserved QPN, prefix, scope */ 773 /* reserved QPN, prefix, scope */
777 if (memcmp(addr, broadcast, 6)) 774 if (memcmp(addr, broadcast, 6))
778 return 0; 775 return 0;
@@ -787,7 +784,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
787 struct ipoib_dev_priv *priv = 784 struct ipoib_dev_priv *priv =
788 container_of(work, struct ipoib_dev_priv, restart_task); 785 container_of(work, struct ipoib_dev_priv, restart_task);
789 struct net_device *dev = priv->dev; 786 struct net_device *dev = priv->dev;
790 struct dev_mc_list *mclist; 787 struct netdev_hw_addr *ha;
791 struct ipoib_mcast *mcast, *tmcast; 788 struct ipoib_mcast *mcast, *tmcast;
792 LIST_HEAD(remove_list); 789 LIST_HEAD(remove_list);
793 unsigned long flags; 790 unsigned long flags;
@@ -812,15 +809,13 @@ void ipoib_mcast_restart_task(struct work_struct *work)
812 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags); 809 clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
813 810
814 /* Mark all of the entries that are found or don't exist */ 811 /* Mark all of the entries that are found or don't exist */
815 netdev_for_each_mc_addr(mclist, dev) { 812 netdev_for_each_mc_addr(ha, dev) {
816 union ib_gid mgid; 813 union ib_gid mgid;
817 814
818 if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr, 815 if (!ipoib_mcast_addr_is_valid(ha->addr, dev->broadcast))
819 mclist->dmi_addrlen,
820 dev->broadcast))
821 continue; 816 continue;
822 817
823 memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid); 818 memcpy(mgid.raw, ha->addr + 4, sizeof mgid);
824 819
825 mcast = __ipoib_mcast_find(dev, &mgid); 820 mcast = __ipoib_mcast_find(dev, &mgid);
826 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) { 821 if (!mcast || test_bit(IPOIB_MCAST_FLAG_SENDONLY, &mcast->flags)) {
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index 441c0642b30a..cccea412088b 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -1109,14 +1109,14 @@ static int dvb_net_feed_stop(struct net_device *dev)
1109} 1109}
1110 1110
1111 1111
1112static int dvb_set_mc_filter (struct net_device *dev, struct dev_mc_list *mc) 1112static int dvb_set_mc_filter(struct net_device *dev, unsigned char *addr)
1113{ 1113{
1114 struct dvb_net_priv *priv = netdev_priv(dev); 1114 struct dvb_net_priv *priv = netdev_priv(dev);
1115 1115
1116 if (priv->multi_num == DVB_NET_MULTICAST_MAX) 1116 if (priv->multi_num == DVB_NET_MULTICAST_MAX)
1117 return -ENOMEM; 1117 return -ENOMEM;
1118 1118
1119 memcpy(priv->multi_macs[priv->multi_num], mc->dmi_addr, 6); 1119 memcpy(priv->multi_macs[priv->multi_num], addr, ETH_ALEN);
1120 1120
1121 priv->multi_num++; 1121 priv->multi_num++;
1122 return 0; 1122 return 0;
@@ -1140,8 +1140,7 @@ static void wq_set_multicast_list (struct work_struct *work)
1140 dprintk("%s: allmulti mode\n", dev->name); 1140 dprintk("%s: allmulti mode\n", dev->name);
1141 priv->rx_mode = RX_MODE_ALL_MULTI; 1141 priv->rx_mode = RX_MODE_ALL_MULTI;
1142 } else if (!netdev_mc_empty(dev)) { 1142 } else if (!netdev_mc_empty(dev)) {
1143 int mci; 1143 struct netdev_hw_addr *ha;
1144 struct dev_mc_list *mc;
1145 1144
1146 dprintk("%s: set_mc_list, %d entries\n", 1145 dprintk("%s: set_mc_list, %d entries\n",
1147 dev->name, netdev_mc_count(dev)); 1146 dev->name, netdev_mc_count(dev));
@@ -1149,11 +1148,8 @@ static void wq_set_multicast_list (struct work_struct *work)
1149 priv->rx_mode = RX_MODE_MULTI; 1148 priv->rx_mode = RX_MODE_MULTI;
1150 priv->multi_num = 0; 1149 priv->multi_num = 0;
1151 1150
1152 for (mci = 0, mc=dev->mc_list; 1151 netdev_for_each_mc_addr(ha, dev)
1153 mci < netdev_mc_count(dev); 1152 dvb_set_mc_filter(dev, ha->addr);
1154 mc = mc->next, mci++) {
1155 dvb_set_mc_filter(dev, mc);
1156 }
1157 } 1153 }
1158 1154
1159 netif_addr_unlock_bh(dev); 1155 netif_addr_unlock_bh(dev);
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 66e0323c1839..b74a0eadbd6c 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -380,6 +380,12 @@ out:
380 return retval; 380 return retval;
381} 381}
382 382
383static irqreturn_t el2_probe_interrupt(int irq, void *seen)
384{
385 *(bool *)seen = true;
386 return IRQ_HANDLED;
387}
388
383static int 389static int
384el2_open(struct net_device *dev) 390el2_open(struct net_device *dev)
385{ 391{
@@ -391,23 +397,35 @@ el2_open(struct net_device *dev)
391 397
392 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ 398 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
393 do { 399 do {
394 retval = request_irq(*irqp, NULL, 0, "bogus", dev); 400 bool seen;
395 if (retval >= 0) { 401
402 retval = request_irq(*irqp, el2_probe_interrupt, 0,
403 dev->name, &seen);
404 if (retval == -EBUSY)
405 continue;
406 if (retval < 0)
407 goto err_disable;
408
396 /* Twinkle the interrupt, and check if it's seen. */ 409 /* Twinkle the interrupt, and check if it's seen. */
397 unsigned long cookie = probe_irq_on(); 410 seen = false;
411 smp_wmb();
398 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR); 412 outb_p(0x04 << ((*irqp == 9) ? 2 : *irqp), E33G_IDCFR);
399 outb_p(0x00, E33G_IDCFR); 413 outb_p(0x00, E33G_IDCFR);
400 if (*irqp == probe_irq_off(cookie) && /* It's a good IRQ line! */ 414 msleep(1);
401 ((retval = request_irq(dev->irq = *irqp, 415 free_irq(*irqp, el2_probe_interrupt);
402 eip_interrupt, 0, 416 if (!seen)
403 dev->name, dev)) == 0)) 417 continue;
404 break; 418
405 } else { 419 retval = request_irq(dev->irq = *irqp, eip_interrupt, 0,
406 if (retval != -EBUSY) 420 dev->name, dev);
407 return retval; 421 if (retval == -EBUSY)
408 } 422 continue;
423 if (retval < 0)
424 goto err_disable;
409 } while (*++irqp); 425 } while (*++irqp);
426
410 if (*irqp == 0) { 427 if (*irqp == 0) {
428 err_disable:
411 outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */ 429 outb(EGACFR_IRQOFF, E33G_GACFR); /* disable interrupts. */
412 return -EAGAIN; 430 return -EAGAIN;
413 } 431 }
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 29b8d1d63bde..8d584f5fd02d 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev)
1216static void elp_set_mc_list(struct net_device *dev) 1216static void elp_set_mc_list(struct net_device *dev)
1217{ 1217{
1218 elp_device *adapter = netdev_priv(dev); 1218 elp_device *adapter = netdev_priv(dev);
1219 struct dev_mc_list *dmi; 1219 struct netdev_hw_addr *ha;
1220 int i; 1220 int i;
1221 unsigned long flags; 1221 unsigned long flags;
1222 1222
@@ -1231,8 +1231,9 @@ static void elp_set_mc_list(struct net_device *dev)
1231 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST; 1231 adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
1232 adapter->tx_pcb.length = 6 * netdev_mc_count(dev); 1232 adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
1233 i = 0; 1233 i = 0;
1234 netdev_for_each_mc_addr(dmi, dev) 1234 netdev_for_each_mc_addr(ha, dev)
1235 memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6); 1235 memcpy(adapter->tx_pcb.data.multicast[i++],
1236 ha->addr, 6);
1236 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0; 1237 adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
1237 if (!send_pcb(dev, &adapter->tx_pcb)) 1238 if (!send_pcb(dev, &adapter->tx_pcb))
1238 pr_err("%s: couldn't send set_multicast command\n", dev->name); 1239 pr_err("%s: couldn't send set_multicast command\n", dev->name);
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 1719079cc498..8c70686d43a1 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -624,7 +624,7 @@ static int init586(struct net_device *dev)
624 volatile struct iasetup_cmd_struct *ias_cmd; 624 volatile struct iasetup_cmd_struct *ias_cmd;
625 volatile struct tdr_cmd_struct *tdr_cmd; 625 volatile struct tdr_cmd_struct *tdr_cmd;
626 volatile struct mcsetup_cmd_struct *mc_cmd; 626 volatile struct mcsetup_cmd_struct *mc_cmd;
627 struct dev_mc_list *dmi; 627 struct netdev_hw_addr *ha;
628 int num_addrs = netdev_mc_count(dev); 628 int num_addrs = netdev_mc_count(dev);
629 629
630 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct)); 630 ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -787,8 +787,9 @@ static int init586(struct net_device *dev)
787 mc_cmd->cmd_link = 0xffff; 787 mc_cmd->cmd_link = 0xffff;
788 mc_cmd->mc_cnt = num_addrs * 6; 788 mc_cmd->mc_cnt = num_addrs * 6;
789 i = 0; 789 i = 0;
790 netdev_for_each_mc_addr(dmi, dev) 790 netdev_for_each_mc_addr(ha, dev)
791 memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6); 791 memcpy((char *) mc_cmd->mc_list[i++],
792 ha->addr, 6);
792 p->scb->cbl_offset = make16(mc_cmd); 793 p->scb->cbl_offset = make16(mc_cmd);
793 p->scb->cmd = CUC_START; 794 p->scb->cmd = CUC_START;
794 elmc_id_attn586(); 795 elmc_id_attn586();
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 5c07b147ec99..38395dfa4963 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1533,7 +1533,7 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1533 { 1533 {
1534 unsigned char block[62]; 1534 unsigned char block[62];
1535 unsigned char *bp; 1535 unsigned char *bp;
1536 struct dev_mc_list *dmc; 1536 struct netdev_hw_addr *ha;
1537 1537
1538 if(retry==0) 1538 if(retry==0)
1539 lp->mc_list_valid = 0; 1539 lp->mc_list_valid = 0;
@@ -1543,8 +1543,8 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
1543 block[0]=netdev_mc_count(dev); 1543 block[0]=netdev_mc_count(dev);
1544 bp=block+2; 1544 bp=block+2;
1545 1545
1546 netdev_for_each_mc_addr(dmc, dev) { 1546 netdev_for_each_mc_addr(ha, dev) {
1547 memcpy(bp, dmc->dmi_addr, 6); 1547 memcpy(bp, ha->addr, 6);
1548 bp+=6; 1548 bp+=6;
1549 } 1549 }
1550 if(mc32_command_nowait(dev, 2, block, 1550 if(mc32_command_nowait(dev, 2, block,
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 5f92fdbe66e2..97525307ed27 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2129,8 +2129,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2129 int i; 2129 int i;
2130 2130
2131 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, 2131 vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
2132 skb->len-skb->data_len, PCI_DMA_TODEVICE)); 2132 skb_headlen(skb), PCI_DMA_TODEVICE));
2133 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len); 2133 vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2134 2134
2135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 2135 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2136 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2136 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index 500e135723bd..f09e59451495 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -594,7 +594,7 @@ static void lance_load_multicast (struct net_device *dev)
594 struct lance_private *lp = netdev_priv(dev); 594 struct lance_private *lp = netdev_priv(dev);
595 volatile struct lance_init_block *ib = lp->init_block; 595 volatile struct lance_init_block *ib = lp->init_block;
596 volatile u16 *mcast_table = (u16 *)&ib->filter; 596 volatile u16 *mcast_table = (u16 *)&ib->filter;
597 struct dev_mc_list *dmi; 597 struct netdev_hw_addr *ha;
598 char *addrs; 598 char *addrs;
599 u32 crc; 599 u32 crc;
600 600
@@ -609,8 +609,8 @@ static void lance_load_multicast (struct net_device *dev)
609 ib->filter [1] = 0; 609 ib->filter [1] = 0;
610 610
611 /* Add addresses */ 611 /* Add addresses */
612 netdev_for_each_mc_addr(dmi, dev) { 612 netdev_for_each_mc_addr(ha, dev) {
613 addrs = dmi->dmi_addr; 613 addrs = ha->addr;
614 614
615 /* multicast address? */ 615 /* multicast address? */
616 if (!(*addrs & 1)) 616 if (!(*addrs & 1))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a09e6ce3eaa0..4e8d11cfe477 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -910,11 +910,11 @@ static void __cp_set_rx_mode (struct net_device *dev)
910 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 910 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
911 mc_filter[1] = mc_filter[0] = 0xffffffff; 911 mc_filter[1] = mc_filter[0] = 0xffffffff;
912 } else { 912 } else {
913 struct dev_mc_list *mclist; 913 struct netdev_hw_addr *ha;
914 rx_mode = AcceptBroadcast | AcceptMyPhys; 914 rx_mode = AcceptBroadcast | AcceptMyPhys;
915 mc_filter[1] = mc_filter[0] = 0; 915 mc_filter[1] = mc_filter[0] = 0;
916 netdev_for_each_mc_addr(mclist, dev) { 916 netdev_for_each_mc_addr(ha, dev) {
917 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 917 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
918 918
919 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 919 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
920 rx_mode |= AcceptMulticast; 920 rx_mode |= AcceptMulticast;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f0d23de32967..d0cb372a0f0d 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2503,11 +2503,11 @@ static void __set_rx_mode (struct net_device *dev)
2503 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 2503 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
2504 mc_filter[1] = mc_filter[0] = 0xffffffff; 2504 mc_filter[1] = mc_filter[0] = 0xffffffff;
2505 } else { 2505 } else {
2506 struct dev_mc_list *mclist; 2506 struct netdev_hw_addr *ha;
2507 rx_mode = AcceptBroadcast | AcceptMyPhys; 2507 rx_mode = AcceptBroadcast | AcceptMyPhys;
2508 mc_filter[1] = mc_filter[0] = 0; 2508 mc_filter[1] = mc_filter[0] = 0;
2509 netdev_for_each_mc_addr(mclist, dev) { 2509 netdev_for_each_mc_addr(ha, dev) {
2510 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 2510 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2511 2511
2512 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2512 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2513 rx_mode |= AcceptMulticast; 2513 rx_mode |= AcceptMulticast;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 56e68db48861..97c5fc019cd4 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1542,7 +1542,7 @@ static void set_multicast_list(struct net_device *dev)
1542 } 1542 }
1543 1543
1544 if (!netdev_mc_empty(dev)) { 1544 if (!netdev_mc_empty(dev)) {
1545 struct dev_mc_list *dmi; 1545 struct netdev_hw_addr *ha;
1546 unsigned char *cp; 1546 unsigned char *cp;
1547 struct mc_cmd *cmd; 1547 struct mc_cmd *cmd;
1548 1548
@@ -1552,10 +1552,10 @@ static void set_multicast_list(struct net_device *dev)
1552 cmd->cmd.command = CmdMulticastList; 1552 cmd->cmd.command = CmdMulticastList;
1553 cmd->mc_cnt = cnt * ETH_ALEN; 1553 cmd->mc_cnt = cnt * ETH_ALEN;
1554 cp = cmd->mc_addrs; 1554 cp = cmd->mc_addrs;
1555 netdev_for_each_mc_addr(dmi, dev) { 1555 netdev_for_each_mc_addr(ha, dev) {
1556 if (!cnt--) 1556 if (!cnt--)
1557 break; 1557 break;
1558 memcpy(cp, dmi->dmi_addr, ETH_ALEN); 1558 memcpy(cp, ha->addr, ETH_ALEN);
1559 if (i596_debug > 1) 1559 if (i596_debug > 1)
1560 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", 1560 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
1561 dev->name, cp)); 1561 dev->name, cp));
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 7b832c727f87..dbd26f992158 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -483,7 +483,7 @@ config XTENSA_XT2000_SONIC
483 This is the driver for the onboard card of the Xtensa XT2000 board. 483 This is the driver for the onboard card of the Xtensa XT2000 board.
484 484
485config MIPS_AU1X00_ENET 485config MIPS_AU1X00_ENET
486 bool "MIPS AU1000 Ethernet support" 486 tristate "MIPS AU1000 Ethernet support"
487 depends on SOC_AU1X00 487 depends on SOC_AU1X00
488 select PHYLIB 488 select PHYLIB
489 select CRC32 489 select CRC32
@@ -1916,6 +1916,7 @@ config FEC
1916 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1916 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1917 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1917 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1918 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1918 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
1919 select PHYLIB
1919 help 1920 help
1920 Say Y here if you want to use the built-in 10/100 Fast ethernet 1921 Say Y here if you want to use the built-in 10/100 Fast ethernet
1921 controller on some Motorola ColdFire and Freescale i.MX processors. 1922 controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -2434,8 +2435,8 @@ config MV643XX_ETH
2434 2435
2435config XILINX_LL_TEMAC 2436config XILINX_LL_TEMAC
2436 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 2437 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
2438 depends on PPC || MICROBLAZE
2437 select PHYLIB 2439 select PHYLIB
2438 depends on PPC_DCR_NATIVE
2439 help 2440 help
2440 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC 2441 This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
2441 core used in Xilinx Spartan and Virtex FPGAs 2442 core used in Xilinx Spartan and Virtex FPGAs
@@ -2618,11 +2619,11 @@ config EHEA
2618 will be called ehea. 2619 will be called ehea.
2619 2620
2620config ENIC 2621config ENIC
2621 tristate "Cisco 10G Ethernet NIC support" 2622 tristate "Cisco VIC Ethernet NIC Support"
2622 depends on PCI && INET 2623 depends on PCI && INET
2623 select INET_LRO 2624 select INET_LRO
2624 help 2625 help
2625 This enables the support for the Cisco 10G Ethernet card. 2626 This enables the support for the Cisco VIC Ethernet card.
2626 2627
2627config IXGBE 2628config IXGBE
2628 tristate "Intel(R) 10GbE PCI Express adapters support" 2629 tristate "Intel(R) 10GbE PCI Express adapters support"
@@ -2862,6 +2863,8 @@ source "drivers/ieee802154/Kconfig"
2862 2863
2863source "drivers/s390/net/Kconfig" 2864source "drivers/s390/net/Kconfig"
2864 2865
2866source "drivers/net/caif/Kconfig"
2867
2865config XEN_NETDEV_FRONTEND 2868config XEN_NETDEV_FRONTEND
2866 tristate "Xen network device frontend driver" 2869 tristate "Xen network device frontend driver"
2867 depends on XEN 2870 depends on XEN
@@ -3180,17 +3183,12 @@ config PPPOATM
3180 3183
3181config PPPOL2TP 3184config PPPOL2TP
3182 tristate "PPP over L2TP (EXPERIMENTAL)" 3185 tristate "PPP over L2TP (EXPERIMENTAL)"
3183 depends on EXPERIMENTAL && PPP && INET 3186 depends on EXPERIMENTAL && L2TP && PPP
3184 help 3187 help
3185 Support for PPP-over-L2TP socket family. L2TP is a protocol 3188 Support for PPP-over-L2TP socket family. L2TP is a protocol
3186 used by ISPs and enterprises to tunnel PPP traffic over UDP 3189 used by ISPs and enterprises to tunnel PPP traffic over UDP
3187 tunnels. L2TP is replacing PPTP for VPN uses. 3190 tunnels. L2TP is replacing PPTP for VPN uses.
3188 3191
3189 This kernel component handles only L2TP data packets: a
3190 userland daemon handles L2TP the control protocol (tunnel
3191 and session setup). One such daemon is OpenL2TP
3192 (http://openl2tp.sourceforge.net/).
3193
3194config SLIP 3192config SLIP
3195 tristate "SLIP (serial line) support" 3193 tristate "SLIP (serial line) support"
3196 ---help--- 3194 ---help---
@@ -3277,15 +3275,14 @@ config NET_FC
3277 "SCSI generic support". 3275 "SCSI generic support".
3278 3276
3279config NETCONSOLE 3277config NETCONSOLE
3280 tristate "Network console logging support (EXPERIMENTAL)" 3278 tristate "Network console logging support"
3281 depends on EXPERIMENTAL
3282 ---help--- 3279 ---help---
3283 If you want to log kernel messages over the network, enable this. 3280 If you want to log kernel messages over the network, enable this.
3284 See <file:Documentation/networking/netconsole.txt> for details. 3281 See <file:Documentation/networking/netconsole.txt> for details.
3285 3282
3286config NETCONSOLE_DYNAMIC 3283config NETCONSOLE_DYNAMIC
3287 bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)" 3284 bool "Dynamic reconfiguration of logging targets"
3288 depends on NETCONSOLE && SYSFS && EXPERIMENTAL 3285 depends on NETCONSOLE && SYSFS
3289 select CONFIGFS_FS 3286 select CONFIGFS_FS
3290 help 3287 help
3291 This option enables the ability to dynamically reconfigure target 3288 This option enables the ability to dynamically reconfigure target
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a583b50d9de8..ebf80b983063 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -161,7 +161,7 @@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflate.o
161obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o 161obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o 162obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o 163obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
164obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o 164obj-$(CONFIG_PPPOL2TP) += pppox.o
165 165
166obj-$(CONFIG_SLIP) += slip.o 166obj-$(CONFIG_SLIP) += slip.o
167obj-$(CONFIG_SLHC) += slhc.o 167obj-$(CONFIG_SLHC) += slhc.o
@@ -291,5 +291,6 @@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.o
291obj-$(CONFIG_SFC) += sfc/ 291obj-$(CONFIG_SFC) += sfc/
292 292
293obj-$(CONFIG_WIMAX) += wimax/ 293obj-$(CONFIG_WIMAX) += wimax/
294obj-$(CONFIG_CAIF) += caif/
294 295
295obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/ 296obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index ed5e9742be2c..ce0a0b8787d8 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -602,7 +602,7 @@ static void lance_load_multicast (struct net_device *dev)
602 struct lance_private *lp = netdev_priv(dev); 602 struct lance_private *lp = netdev_priv(dev);
603 volatile struct lance_init_block *ib = lp->init_block; 603 volatile struct lance_init_block *ib = lp->init_block;
604 volatile u16 *mcast_table = (u16 *)&ib->filter; 604 volatile u16 *mcast_table = (u16 *)&ib->filter;
605 struct dev_mc_list *dmi; 605 struct netdev_hw_addr *ha;
606 char *addrs; 606 char *addrs;
607 u32 crc; 607 u32 crc;
608 608
@@ -617,8 +617,8 @@ static void lance_load_multicast (struct net_device *dev)
617 ib->filter [1] = 0; 617 ib->filter [1] = 0;
618 618
619 /* Add addresses */ 619 /* Add addresses */
620 netdev_for_each_mc_addr(dmi, dev) { 620 netdev_for_each_mc_addr(ha, dev) {
621 addrs = dmi->dmi_addr; 621 addrs = ha->addr;
622 622
623 /* multicast address? */ 623 /* multicast address? */
624 if (!(*addrs & 1)) 624 if (!(*addrs & 1))
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 97a3dfd94dfa..1328eb9b841d 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -661,7 +661,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
661 dma_addr_t mapping; 661 dma_addr_t mapping;
662 662
663 ringp = &ap->skb->rx_std_skbuff[i]; 663 ringp = &ap->skb->rx_std_skbuff[i];
664 mapping = pci_unmap_addr(ringp, mapping); 664 mapping = dma_unmap_addr(ringp, mapping);
665 pci_unmap_page(ap->pdev, mapping, 665 pci_unmap_page(ap->pdev, mapping,
666 ACE_STD_BUFSIZE, 666 ACE_STD_BUFSIZE,
667 PCI_DMA_FROMDEVICE); 667 PCI_DMA_FROMDEVICE);
@@ -681,7 +681,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
681 dma_addr_t mapping; 681 dma_addr_t mapping;
682 682
683 ringp = &ap->skb->rx_mini_skbuff[i]; 683 ringp = &ap->skb->rx_mini_skbuff[i];
684 mapping = pci_unmap_addr(ringp,mapping); 684 mapping = dma_unmap_addr(ringp,mapping);
685 pci_unmap_page(ap->pdev, mapping, 685 pci_unmap_page(ap->pdev, mapping,
686 ACE_MINI_BUFSIZE, 686 ACE_MINI_BUFSIZE,
687 PCI_DMA_FROMDEVICE); 687 PCI_DMA_FROMDEVICE);
@@ -700,7 +700,7 @@ static void __devexit acenic_remove_one(struct pci_dev *pdev)
700 dma_addr_t mapping; 700 dma_addr_t mapping;
701 701
702 ringp = &ap->skb->rx_jumbo_skbuff[i]; 702 ringp = &ap->skb->rx_jumbo_skbuff[i];
703 mapping = pci_unmap_addr(ringp, mapping); 703 mapping = dma_unmap_addr(ringp, mapping);
704 pci_unmap_page(ap->pdev, mapping, 704 pci_unmap_page(ap->pdev, mapping,
705 ACE_JUMBO_BUFSIZE, 705 ACE_JUMBO_BUFSIZE,
706 PCI_DMA_FROMDEVICE); 706 PCI_DMA_FROMDEVICE);
@@ -1683,7 +1683,7 @@ static void ace_load_std_rx_ring(struct ace_private *ap, int nr_bufs)
1683 ACE_STD_BUFSIZE, 1683 ACE_STD_BUFSIZE,
1684 PCI_DMA_FROMDEVICE); 1684 PCI_DMA_FROMDEVICE);
1685 ap->skb->rx_std_skbuff[idx].skb = skb; 1685 ap->skb->rx_std_skbuff[idx].skb = skb;
1686 pci_unmap_addr_set(&ap->skb->rx_std_skbuff[idx], 1686 dma_unmap_addr_set(&ap->skb->rx_std_skbuff[idx],
1687 mapping, mapping); 1687 mapping, mapping);
1688 1688
1689 rd = &ap->rx_std_ring[idx]; 1689 rd = &ap->rx_std_ring[idx];
@@ -1744,7 +1744,7 @@ static void ace_load_mini_rx_ring(struct ace_private *ap, int nr_bufs)
1744 ACE_MINI_BUFSIZE, 1744 ACE_MINI_BUFSIZE,
1745 PCI_DMA_FROMDEVICE); 1745 PCI_DMA_FROMDEVICE);
1746 ap->skb->rx_mini_skbuff[idx].skb = skb; 1746 ap->skb->rx_mini_skbuff[idx].skb = skb;
1747 pci_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx], 1747 dma_unmap_addr_set(&ap->skb->rx_mini_skbuff[idx],
1748 mapping, mapping); 1748 mapping, mapping);
1749 1749
1750 rd = &ap->rx_mini_ring[idx]; 1750 rd = &ap->rx_mini_ring[idx];
@@ -1800,7 +1800,7 @@ static void ace_load_jumbo_rx_ring(struct ace_private *ap, int nr_bufs)
1800 ACE_JUMBO_BUFSIZE, 1800 ACE_JUMBO_BUFSIZE,
1801 PCI_DMA_FROMDEVICE); 1801 PCI_DMA_FROMDEVICE);
1802 ap->skb->rx_jumbo_skbuff[idx].skb = skb; 1802 ap->skb->rx_jumbo_skbuff[idx].skb = skb;
1803 pci_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx], 1803 dma_unmap_addr_set(&ap->skb->rx_jumbo_skbuff[idx],
1804 mapping, mapping); 1804 mapping, mapping);
1805 1805
1806 rd = &ap->rx_jumbo_ring[idx]; 1806 rd = &ap->rx_jumbo_ring[idx];
@@ -2013,7 +2013,7 @@ static void ace_rx_int(struct net_device *dev, u32 rxretprd, u32 rxretcsm)
2013 skb = rip->skb; 2013 skb = rip->skb;
2014 rip->skb = NULL; 2014 rip->skb = NULL;
2015 pci_unmap_page(ap->pdev, 2015 pci_unmap_page(ap->pdev,
2016 pci_unmap_addr(rip, mapping), 2016 dma_unmap_addr(rip, mapping),
2017 mapsize, 2017 mapsize,
2018 PCI_DMA_FROMDEVICE); 2018 PCI_DMA_FROMDEVICE);
2019 skb_put(skb, retdesc->size); 2019 skb_put(skb, retdesc->size);
@@ -2078,18 +2078,16 @@ static inline void ace_tx_int(struct net_device *dev,
2078 2078
2079 do { 2079 do {
2080 struct sk_buff *skb; 2080 struct sk_buff *skb;
2081 dma_addr_t mapping;
2082 struct tx_ring_info *info; 2081 struct tx_ring_info *info;
2083 2082
2084 info = ap->skb->tx_skbuff + idx; 2083 info = ap->skb->tx_skbuff + idx;
2085 skb = info->skb; 2084 skb = info->skb;
2086 mapping = pci_unmap_addr(info, mapping);
2087 2085
2088 if (mapping) { 2086 if (dma_unmap_len(info, maplen)) {
2089 pci_unmap_page(ap->pdev, mapping, 2087 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2090 pci_unmap_len(info, maplen), 2088 dma_unmap_len(info, maplen),
2091 PCI_DMA_TODEVICE); 2089 PCI_DMA_TODEVICE);
2092 pci_unmap_addr_set(info, mapping, 0); 2090 dma_unmap_len_set(info, maplen, 0);
2093 } 2091 }
2094 2092
2095 if (skb) { 2093 if (skb) {
@@ -2377,14 +2375,12 @@ static int ace_close(struct net_device *dev)
2377 2375
2378 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) { 2376 for (i = 0; i < ACE_TX_RING_ENTRIES(ap); i++) {
2379 struct sk_buff *skb; 2377 struct sk_buff *skb;
2380 dma_addr_t mapping;
2381 struct tx_ring_info *info; 2378 struct tx_ring_info *info;
2382 2379
2383 info = ap->skb->tx_skbuff + i; 2380 info = ap->skb->tx_skbuff + i;
2384 skb = info->skb; 2381 skb = info->skb;
2385 mapping = pci_unmap_addr(info, mapping);
2386 2382
2387 if (mapping) { 2383 if (dma_unmap_len(info, maplen)) {
2388 if (ACE_IS_TIGON_I(ap)) { 2384 if (ACE_IS_TIGON_I(ap)) {
2389 /* NB: TIGON_1 is special, tx_ring is in io space */ 2385 /* NB: TIGON_1 is special, tx_ring is in io space */
2390 struct tx_desc __iomem *tx; 2386 struct tx_desc __iomem *tx;
@@ -2395,10 +2391,10 @@ static int ace_close(struct net_device *dev)
2395 } else 2391 } else
2396 memset(ap->tx_ring + i, 0, 2392 memset(ap->tx_ring + i, 0,
2397 sizeof(struct tx_desc)); 2393 sizeof(struct tx_desc));
2398 pci_unmap_page(ap->pdev, mapping, 2394 pci_unmap_page(ap->pdev, dma_unmap_addr(info, mapping),
2399 pci_unmap_len(info, maplen), 2395 dma_unmap_len(info, maplen),
2400 PCI_DMA_TODEVICE); 2396 PCI_DMA_TODEVICE);
2401 pci_unmap_addr_set(info, mapping, 0); 2397 dma_unmap_len_set(info, maplen, 0);
2402 } 2398 }
2403 if (skb) { 2399 if (skb) {
2404 dev_kfree_skb(skb); 2400 dev_kfree_skb(skb);
@@ -2433,8 +2429,8 @@ ace_map_tx_skb(struct ace_private *ap, struct sk_buff *skb,
2433 2429
2434 info = ap->skb->tx_skbuff + idx; 2430 info = ap->skb->tx_skbuff + idx;
2435 info->skb = tail; 2431 info->skb = tail;
2436 pci_unmap_addr_set(info, mapping, mapping); 2432 dma_unmap_addr_set(info, mapping, mapping);
2437 pci_unmap_len_set(info, maplen, skb->len); 2433 dma_unmap_len_set(info, maplen, skb->len);
2438 return mapping; 2434 return mapping;
2439} 2435}
2440 2436
@@ -2553,8 +2549,8 @@ restart:
2553 } else { 2549 } else {
2554 info->skb = NULL; 2550 info->skb = NULL;
2555 } 2551 }
2556 pci_unmap_addr_set(info, mapping, mapping); 2552 dma_unmap_addr_set(info, mapping, mapping);
2557 pci_unmap_len_set(info, maplen, frag->size); 2553 dma_unmap_len_set(info, maplen, frag->size);
2558 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag); 2554 ace_load_tx_bd(ap, desc, mapping, flagsize, vlan_tag);
2559 } 2555 }
2560 } 2556 }
diff --git a/drivers/net/acenic.h b/drivers/net/acenic.h
index 17079b927ffa..0681da7e8753 100644
--- a/drivers/net/acenic.h
+++ b/drivers/net/acenic.h
@@ -589,7 +589,7 @@ struct ace_info {
589 589
590struct ring_info { 590struct ring_info {
591 struct sk_buff *skb; 591 struct sk_buff *skb;
592 DECLARE_PCI_UNMAP_ADDR(mapping) 592 DEFINE_DMA_UNMAP_ADDR(mapping);
593}; 593};
594 594
595 595
@@ -600,8 +600,8 @@ struct ring_info {
600 */ 600 */
601struct tx_ring_info { 601struct tx_ring_info {
602 struct sk_buff *skb; 602 struct sk_buff *skb;
603 DECLARE_PCI_UNMAP_ADDR(mapping) 603 DEFINE_DMA_UNMAP_ADDR(mapping);
604 DECLARE_PCI_UNMAP_LEN(maplen) 604 DEFINE_DMA_UNMAP_LEN(maplen);
605}; 605};
606 606
607 607
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 8d58f0a8f42f..97d71a960602 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -1376,7 +1376,7 @@ list to the device.
1376*/ 1376*/
1377static void amd8111e_set_multicast_list(struct net_device *dev) 1377static void amd8111e_set_multicast_list(struct net_device *dev)
1378{ 1378{
1379 struct dev_mc_list *mc_ptr; 1379 struct netdev_hw_addr *ha;
1380 struct amd8111e_priv *lp = netdev_priv(dev); 1380 struct amd8111e_priv *lp = netdev_priv(dev);
1381 u32 mc_filter[2] ; 1381 u32 mc_filter[2] ;
1382 int bit_num; 1382 int bit_num;
@@ -1407,8 +1407,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
1407 /* load all the multicast addresses in the logic filter */ 1407 /* load all the multicast addresses in the logic filter */
1408 lp->options |= OPTION_MULTICAST_ENABLE; 1408 lp->options |= OPTION_MULTICAST_ENABLE;
1409 mc_filter[1] = mc_filter[0] = 0; 1409 mc_filter[1] = mc_filter[0] = 0;
1410 netdev_for_each_mc_addr(mc_ptr, dev) { 1410 netdev_for_each_mc_addr(ha, dev) {
1411 bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f; 1411 bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
1412 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31); 1412 mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
1413 } 1413 }
1414 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF); 1414 amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index f1f58c5e27bf..a4b5b08276f8 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -383,12 +383,12 @@ static void am79c961_setmulticastlist (struct net_device *dev)
383 } else if (dev->flags & IFF_ALLMULTI) { 383 } else if (dev->flags & IFF_ALLMULTI) {
384 memset(multi_hash, 0xff, sizeof(multi_hash)); 384 memset(multi_hash, 0xff, sizeof(multi_hash));
385 } else { 385 } else {
386 struct dev_mc_list *dmi; 386 struct netdev_hw_addr *ha;
387 387
388 memset(multi_hash, 0x00, sizeof(multi_hash)); 388 memset(multi_hash, 0x00, sizeof(multi_hash));
389 389
390 netdev_for_each_mc_addr(dmi, dev) 390 netdev_for_each_mc_addr(ha, dev)
391 am79c961_mc_hash(dmi->dmi_addr, multi_hash); 391 am79c961_mc_hash(ha->addr, multi_hash);
392 } 392 }
393 393
394 spin_lock_irqsave(&priv->chip_lock, flags); 394 spin_lock_irqsave(&priv->chip_lock, flags);
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index aed5b5479b50..0adab30f626b 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -557,14 +557,14 @@ static int hash_get_index(__u8 *addr)
557 */ 557 */
558static void at91ether_sethashtable(struct net_device *dev) 558static void at91ether_sethashtable(struct net_device *dev)
559{ 559{
560 struct dev_mc_list *curr; 560 struct netdev_hw_addr *ha;
561 unsigned long mc_filter[2]; 561 unsigned long mc_filter[2];
562 unsigned int bitnr; 562 unsigned int bitnr;
563 563
564 mc_filter[0] = mc_filter[1] = 0; 564 mc_filter[0] = mc_filter[1] = 0;
565 565
566 netdev_for_each_mc_addr(curr, dev) { 566 netdev_for_each_mc_addr(ha, dev) {
567 bitnr = hash_get_index(curr->dmi_addr); 567 bitnr = hash_get_index(ha->addr);
568 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 568 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
569 } 569 }
570 570
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6be8b098b8b4..7800d7dfd299 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -736,7 +736,7 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
736static void eth_set_mcast_list(struct net_device *dev) 736static void eth_set_mcast_list(struct net_device *dev)
737{ 737{
738 struct port *port = netdev_priv(dev); 738 struct port *port = netdev_priv(dev);
739 struct dev_mc_list *mclist; 739 struct netdev_hw_addr *ha;
740 u8 diffs[ETH_ALEN], *addr; 740 u8 diffs[ETH_ALEN], *addr;
741 int i; 741 int i;
742 742
@@ -749,11 +749,11 @@ static void eth_set_mcast_list(struct net_device *dev)
749 memset(diffs, 0, ETH_ALEN); 749 memset(diffs, 0, ETH_ALEN);
750 750
751 addr = NULL; 751 addr = NULL;
752 netdev_for_each_mc_addr(mclist, dev) { 752 netdev_for_each_mc_addr(ha, dev) {
753 if (!addr) 753 if (!addr)
754 addr = mclist->dmi_addr; /* first MAC address */ 754 addr = ha->addr; /* first MAC address */
755 for (i = 0; i < ETH_ALEN; i++) 755 for (i = 0; i < ETH_ALEN; i++)
756 diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; 756 diffs[i] |= addr[i] ^ ha->addr[i];
757 } 757 }
758 758
759 for (i = 0; i < ETH_ALEN; i++) { 759 for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 84f8a8f73802..7413a87e40ff 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -332,16 +332,16 @@ ks8695_init_partial_multicast(struct ks8695_priv *ksp,
332{ 332{
333 u32 low, high; 333 u32 low, high;
334 int i; 334 int i;
335 struct dev_mc_list *dmi; 335 struct netdev_hw_addr *ha;
336 336
337 i = 0; 337 i = 0;
338 netdev_for_each_mc_addr(dmi, ndev) { 338 netdev_for_each_mc_addr(ha, ndev) {
339 /* Ran out of space in chip? */ 339 /* Ran out of space in chip? */
340 BUG_ON(i == KS8695_NR_ADDRESSES); 340 BUG_ON(i == KS8695_NR_ADDRESSES);
341 341
342 low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) | 342 low = (ha->addr[2] << 24) | (ha->addr[3] << 16) |
343 (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]); 343 (ha->addr[4] << 8) | (ha->addr[5]);
344 high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]); 344 high = (ha->addr[0] << 8) | (ha->addr[1]);
345 345
346 ks8695_writereg(ksp, KS8695_AAL_(i), low); 346 ks8695_writereg(ksp, KS8695_AAL_(i), low);
347 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high); 347 ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index f7c9ca1dfb17..2491934b73e8 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -744,7 +744,6 @@ static void netdev_rx(struct net_device *dev)
744 return; 744 return;
745 } 745 }
746 746
747 skb->dev = dev;
748 skb_reserve(skb, 2); 747 skb_reserve(skb, 2);
749 skb_put(skb, length); 748 skb_put(skb, length);
750 skb_copy_to_linear_data(skb, data, length); 749 skb_copy_to_linear_data(skb, data, length);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 10a20fb9ae65..332f9806b78e 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -847,12 +847,12 @@ set_rx_mode(struct net_device *dev)
847 memset(mc_filter, 0x00, sizeof(mc_filter)); 847 memset(mc_filter, 0x00, sizeof(mc_filter));
848 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 848 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
849 } else { 849 } else {
850 struct dev_mc_list *mclist; 850 struct netdev_hw_addr *ha;
851 851
852 memset(mc_filter, 0, sizeof(mc_filter)); 852 memset(mc_filter, 0, sizeof(mc_filter));
853 netdev_for_each_mc_addr(mclist, dev) { 853 netdev_for_each_mc_addr(ha, dev) {
854 unsigned int bit = 854 unsigned int bit =
855 ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; 855 ether_crc_le(ETH_ALEN, ha->addr) >> 26;
856 mc_filter[bit >> 3] |= (1 << bit); 856 mc_filter[bit >> 3] |= (1 << bit);
857 } 857 }
858 outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */ 858 outb(0x02, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 50dc531a02d8..3d7051135c3a 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -354,7 +354,7 @@ static void atl1c_set_multi(struct net_device *netdev)
354{ 354{
355 struct atl1c_adapter *adapter = netdev_priv(netdev); 355 struct atl1c_adapter *adapter = netdev_priv(netdev);
356 struct atl1c_hw *hw = &adapter->hw; 356 struct atl1c_hw *hw = &adapter->hw;
357 struct dev_mc_list *mc_ptr; 357 struct netdev_hw_addr *ha;
358 u32 mac_ctrl_data; 358 u32 mac_ctrl_data;
359 u32 hash_value; 359 u32 hash_value;
360 360
@@ -377,8 +377,8 @@ static void atl1c_set_multi(struct net_device *netdev)
377 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 377 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
378 378
379 /* comoute mc addresses' hash value ,and put it into hash table */ 379 /* comoute mc addresses' hash value ,and put it into hash table */
380 netdev_for_each_mc_addr(mc_ptr, netdev) { 380 netdev_for_each_mc_addr(ha, netdev) {
381 hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr); 381 hash_value = atl1c_hash_mc_addr(hw, ha->addr);
382 atl1c_hash_set(hw, hash_value); 382 atl1c_hash_set(hw, hash_value);
383 } 383 }
384} 384}
@@ -1817,7 +1817,6 @@ rrs_checked:
1817 atl1c_clean_rfd(rfd_ring, rrs, rfd_num); 1817 atl1c_clean_rfd(rfd_ring, rrs, rfd_num);
1818 skb_put(skb, length - ETH_FCS_LEN); 1818 skb_put(skb, length - ETH_FCS_LEN);
1819 skb->protocol = eth_type_trans(skb, netdev); 1819 skb->protocol = eth_type_trans(skb, netdev);
1820 skb->dev = netdev;
1821 atl1c_rx_checksum(adapter, skb, rrs); 1820 atl1c_rx_checksum(adapter, skb, rrs);
1822 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) { 1821 if (unlikely(adapter->vlgrp) && rrs->word3 & RRS_VLAN_INS) {
1823 u16 vlan; 1822 u16 vlan;
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 73302ae468aa..7dd33776de00 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -284,7 +284,7 @@ static void atl1e_set_multi(struct net_device *netdev)
284{ 284{
285 struct atl1e_adapter *adapter = netdev_priv(netdev); 285 struct atl1e_adapter *adapter = netdev_priv(netdev);
286 struct atl1e_hw *hw = &adapter->hw; 286 struct atl1e_hw *hw = &adapter->hw;
287 struct dev_mc_list *mc_ptr; 287 struct netdev_hw_addr *ha;
288 u32 mac_ctrl_data = 0; 288 u32 mac_ctrl_data = 0;
289 u32 hash_value; 289 u32 hash_value;
290 290
@@ -307,8 +307,8 @@ static void atl1e_set_multi(struct net_device *netdev)
307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 307 AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
308 308
309 /* comoute mc addresses' hash value ,and put it into hash table */ 309 /* comoute mc addresses' hash value ,and put it into hash table */
310 netdev_for_each_mc_addr(mc_ptr, netdev) { 310 netdev_for_each_mc_addr(ha, netdev) {
311 hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr); 311 hash_value = atl1e_hash_mc_addr(hw, ha->addr);
312 atl1e_hash_set(hw, hash_value); 312 atl1e_hash_set(hw, hash_value);
313 } 313 }
314} 314}
@@ -1428,7 +1428,6 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1428 "Memory squeeze, deferring packet\n"); 1428 "Memory squeeze, deferring packet\n");
1429 goto skip_pkt; 1429 goto skip_pkt;
1430 } 1430 }
1431 skb->dev = netdev;
1432 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1431 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1433 skb_put(skb, packet_size); 1432 skb_put(skb, packet_size);
1434 skb->protocol = eth_type_trans(skb, netdev); 1433 skb->protocol = eth_type_trans(skb, netdev);
@@ -1680,7 +1679,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
1680{ 1679{
1681 struct atl1e_tpd_desc *use_tpd = NULL; 1680 struct atl1e_tpd_desc *use_tpd = NULL;
1682 struct atl1e_tx_buffer *tx_buffer = NULL; 1681 struct atl1e_tx_buffer *tx_buffer = NULL;
1683 u16 buf_len = skb->len - skb->data_len; 1682 u16 buf_len = skb_headlen(skb);
1684 u16 map_len = 0; 1683 u16 map_len = 0;
1685 u16 mapped_len = 0; 1684 u16 mapped_len = 0;
1686 u16 hdr_len = 0; 1685 u16 hdr_len = 0;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 0ebd8208f606..33448a09b47f 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2347,7 +2347,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2347{ 2347{
2348 struct atl1_adapter *adapter = netdev_priv(netdev); 2348 struct atl1_adapter *adapter = netdev_priv(netdev);
2349 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; 2349 struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
2350 int len = skb->len; 2350 int len;
2351 int tso; 2351 int tso;
2352 int count = 1; 2352 int count = 1;
2353 int ret_val; 2353 int ret_val;
@@ -2359,7 +2359,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
2359 unsigned int f; 2359 unsigned int f;
2360 unsigned int proto_hdr_len; 2360 unsigned int proto_hdr_len;
2361 2361
2362 len -= skb->data_len; 2362 len = skb_headlen(skb);
2363 2363
2364 if (unlikely(skb->len <= 0)) { 2364 if (unlikely(skb->len <= 0)) {
2365 dev_kfree_skb_any(skb); 2365 dev_kfree_skb_any(skb);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 54662f24f9bb..fee9cf6a5bd4 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -136,7 +136,7 @@ static void atl2_set_multi(struct net_device *netdev)
136{ 136{
137 struct atl2_adapter *adapter = netdev_priv(netdev); 137 struct atl2_adapter *adapter = netdev_priv(netdev);
138 struct atl2_hw *hw = &adapter->hw; 138 struct atl2_hw *hw = &adapter->hw;
139 struct dev_mc_list *mc_ptr; 139 struct netdev_hw_addr *ha;
140 u32 rctl; 140 u32 rctl;
141 u32 hash_value; 141 u32 hash_value;
142 142
@@ -158,8 +158,8 @@ static void atl2_set_multi(struct net_device *netdev)
158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0); 158 ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
159 159
160 /* comoute mc addresses' hash value ,and put it into hash table */ 160 /* comoute mc addresses' hash value ,and put it into hash table */
161 netdev_for_each_mc_addr(mc_ptr, netdev) { 161 netdev_for_each_mc_addr(ha, netdev) {
162 hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr); 162 hash_value = atl2_hash_mc_addr(hw, ha->addr);
163 atl2_hash_set(hw, hash_value); 163 atl2_hash_set(hw, hash_value);
164 } 164 }
165} 165}
@@ -422,7 +422,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
422 netdev->stats.rx_dropped++; 422 netdev->stats.rx_dropped++;
423 break; 423 break;
424 } 424 }
425 skb->dev = netdev;
426 memcpy(skb->data, rxd->packet, rx_size); 425 memcpy(skb->data, rxd->packet, rx_size);
427 skb_put(skb, rx_size); 426 skb_put(skb, rx_size);
428 skb->protocol = eth_type_trans(skb, netdev); 427 skb->protocol = eth_type_trans(skb, netdev);
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 72f3306352e2..f979ea2d6d3c 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -123,7 +123,7 @@ static void atlx_set_multi(struct net_device *netdev)
123{ 123{
124 struct atlx_adapter *adapter = netdev_priv(netdev); 124 struct atlx_adapter *adapter = netdev_priv(netdev);
125 struct atlx_hw *hw = &adapter->hw; 125 struct atlx_hw *hw = &adapter->hw;
126 struct dev_mc_list *mc_ptr; 126 struct netdev_hw_addr *ha;
127 u32 rctl; 127 u32 rctl;
128 u32 hash_value; 128 u32 hash_value;
129 129
@@ -144,8 +144,8 @@ static void atlx_set_multi(struct net_device *netdev)
144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); 144 iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
145 145
146 /* compute mc addresses' hash value ,and put it into hash table */ 146 /* compute mc addresses' hash value ,and put it into hash table */
147 netdev_for_each_mc_addr(mc_ptr, netdev) { 147 netdev_for_each_mc_addr(ha, netdev) {
148 hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr); 148 hash_value = atlx_hash_mc_addr(hw, ha->addr);
149 atlx_hash_set(hw, hash_value); 149 atlx_hash_set(hw, hash_value);
150 } 150 }
151} 151}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 55039d44dc47..2bd1a5c0ec17 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -882,11 +882,11 @@ static void set_rx_mode_8012(struct net_device *dev)
882 memset(mc_filter, 0xff, sizeof(mc_filter)); 882 memset(mc_filter, 0xff, sizeof(mc_filter));
883 new_mode = CMR2h_Normal; 883 new_mode = CMR2h_Normal;
884 } else { 884 } else {
885 struct dev_mc_list *mclist; 885 struct netdev_hw_addr *ha;
886 886
887 memset(mc_filter, 0, sizeof(mc_filter)); 887 memset(mc_filter, 0, sizeof(mc_filter));
888 netdev_for_each_mc_addr(mclist, dev) { 888 netdev_for_each_mc_addr(ha, dev) {
889 int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 889 int filterbit = ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
890 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 890 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
891 } 891 }
892 new_mode = CMR2h_Normal; 892 new_mode = CMR2h_Normal;
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 4da191b87b0d..7abb2c84ba5d 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -75,14 +75,19 @@ static int au1000_debug = 5;
75static int au1000_debug = 3; 75static int au1000_debug = 3;
76#endif 76#endif
77 77
78#define AU1000_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
79 NETIF_MSG_PROBE | \
80 NETIF_MSG_LINK)
81
78#define DRV_NAME "au1000_eth" 82#define DRV_NAME "au1000_eth"
79#define DRV_VERSION "1.6" 83#define DRV_VERSION "1.7"
80#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>" 84#define DRV_AUTHOR "Pete Popov <ppopov@embeddedalley.com>"
81#define DRV_DESC "Au1xxx on-chip Ethernet driver" 85#define DRV_DESC "Au1xxx on-chip Ethernet driver"
82 86
83MODULE_AUTHOR(DRV_AUTHOR); 87MODULE_AUTHOR(DRV_AUTHOR);
84MODULE_DESCRIPTION(DRV_DESC); 88MODULE_DESCRIPTION(DRV_DESC);
85MODULE_LICENSE("GPL"); 89MODULE_LICENSE("GPL");
90MODULE_VERSION(DRV_VERSION);
86 91
87/* 92/*
88 * Theory of operation 93 * Theory of operation
@@ -148,7 +153,7 @@ struct au1000_private *au_macs[NUM_ETH_INTERFACES];
148 * specific irq-map 153 * specific irq-map
149 */ 154 */
150 155
151static void enable_mac(struct net_device *dev, int force_reset) 156static void au1000_enable_mac(struct net_device *dev, int force_reset)
152{ 157{
153 unsigned long flags; 158 unsigned long flags;
154 struct au1000_private *aup = netdev_priv(dev); 159 struct au1000_private *aup = netdev_priv(dev);
@@ -182,8 +187,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
182 while (*mii_control_reg & MAC_MII_BUSY) { 187 while (*mii_control_reg & MAC_MII_BUSY) {
183 mdelay(1); 188 mdelay(1);
184 if (--timedout == 0) { 189 if (--timedout == 0) {
185 printk(KERN_ERR "%s: read_MII busy timeout!!\n", 190 netdev_err(dev, "read_MII busy timeout!!\n");
186 dev->name);
187 return -1; 191 return -1;
188 } 192 }
189 } 193 }
@@ -197,8 +201,7 @@ static int au1000_mdio_read(struct net_device *dev, int phy_addr, int reg)
197 while (*mii_control_reg & MAC_MII_BUSY) { 201 while (*mii_control_reg & MAC_MII_BUSY) {
198 mdelay(1); 202 mdelay(1);
199 if (--timedout == 0) { 203 if (--timedout == 0) {
200 printk(KERN_ERR "%s: mdio_read busy timeout!!\n", 204 netdev_err(dev, "mdio_read busy timeout!!\n");
201 dev->name);
202 return -1; 205 return -1;
203 } 206 }
204 } 207 }
@@ -217,8 +220,7 @@ static void au1000_mdio_write(struct net_device *dev, int phy_addr,
217 while (*mii_control_reg & MAC_MII_BUSY) { 220 while (*mii_control_reg & MAC_MII_BUSY) {
218 mdelay(1); 221 mdelay(1);
219 if (--timedout == 0) { 222 if (--timedout == 0) {
220 printk(KERN_ERR "%s: mdio_write busy timeout!!\n", 223 netdev_err(dev, "mdio_write busy timeout!!\n");
221 dev->name);
222 return; 224 return;
223 } 225 }
224 } 226 }
@@ -236,7 +238,7 @@ static int au1000_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
236 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */ 238 * _NOT_ hold (e.g. when PHY is accessed through other MAC's MII bus) */
237 struct net_device *const dev = bus->priv; 239 struct net_device *const dev = bus->priv;
238 240
239 enable_mac(dev, 0); /* make sure the MAC associated with this 241 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
240 * mii_bus is enabled */ 242 * mii_bus is enabled */
241 return au1000_mdio_read(dev, phy_addr, regnum); 243 return au1000_mdio_read(dev, phy_addr, regnum);
242} 244}
@@ -246,7 +248,7 @@ static int au1000_mdiobus_write(struct mii_bus *bus, int phy_addr, int regnum,
246{ 248{
247 struct net_device *const dev = bus->priv; 249 struct net_device *const dev = bus->priv;
248 250
249 enable_mac(dev, 0); /* make sure the MAC associated with this 251 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
250 * mii_bus is enabled */ 252 * mii_bus is enabled */
251 au1000_mdio_write(dev, phy_addr, regnum, value); 253 au1000_mdio_write(dev, phy_addr, regnum, value);
252 return 0; 254 return 0;
@@ -256,28 +258,26 @@ static int au1000_mdiobus_reset(struct mii_bus *bus)
256{ 258{
257 struct net_device *const dev = bus->priv; 259 struct net_device *const dev = bus->priv;
258 260
259 enable_mac(dev, 0); /* make sure the MAC associated with this 261 au1000_enable_mac(dev, 0); /* make sure the MAC associated with this
260 * mii_bus is enabled */ 262 * mii_bus is enabled */
261 return 0; 263 return 0;
262} 264}
263 265
264static void hard_stop(struct net_device *dev) 266static void au1000_hard_stop(struct net_device *dev)
265{ 267{
266 struct au1000_private *aup = netdev_priv(dev); 268 struct au1000_private *aup = netdev_priv(dev);
267 269
268 if (au1000_debug > 4) 270 netif_dbg(aup, drv, dev, "hard stop\n");
269 printk(KERN_INFO "%s: hard stop\n", dev->name);
270 271
271 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE); 272 aup->mac->control &= ~(MAC_RX_ENABLE | MAC_TX_ENABLE);
272 au_sync_delay(10); 273 au_sync_delay(10);
273} 274}
274 275
275static void enable_rx_tx(struct net_device *dev) 276static void au1000_enable_rx_tx(struct net_device *dev)
276{ 277{
277 struct au1000_private *aup = netdev_priv(dev); 278 struct au1000_private *aup = netdev_priv(dev);
278 279
279 if (au1000_debug > 4) 280 netif_dbg(aup, hw, dev, "enable_rx_tx\n");
280 printk(KERN_INFO "%s: enable_rx_tx\n", dev->name);
281 281
282 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE); 282 aup->mac->control |= (MAC_RX_ENABLE | MAC_TX_ENABLE);
283 au_sync_delay(10); 283 au_sync_delay(10);
@@ -297,16 +297,15 @@ au1000_adjust_link(struct net_device *dev)
297 spin_lock_irqsave(&aup->lock, flags); 297 spin_lock_irqsave(&aup->lock, flags);
298 298
299 if (phydev->link && (aup->old_speed != phydev->speed)) { 299 if (phydev->link && (aup->old_speed != phydev->speed)) {
300 // speed changed 300 /* speed changed */
301 301
302 switch(phydev->speed) { 302 switch (phydev->speed) {
303 case SPEED_10: 303 case SPEED_10:
304 case SPEED_100: 304 case SPEED_100:
305 break; 305 break;
306 default: 306 default:
307 printk(KERN_WARNING 307 netdev_warn(dev, "Speed (%d) is not 10/100 ???\n",
308 "%s: Speed (%d) is not 10/100 ???\n", 308 phydev->speed);
309 dev->name, phydev->speed);
310 break; 309 break;
311 } 310 }
312 311
@@ -316,10 +315,10 @@ au1000_adjust_link(struct net_device *dev)
316 } 315 }
317 316
318 if (phydev->link && (aup->old_duplex != phydev->duplex)) { 317 if (phydev->link && (aup->old_duplex != phydev->duplex)) {
319 // duplex mode changed 318 /* duplex mode changed */
320 319
321 /* switching duplex mode requires to disable rx and tx! */ 320 /* switching duplex mode requires to disable rx and tx! */
322 hard_stop(dev); 321 au1000_hard_stop(dev);
323 322
324 if (DUPLEX_FULL == phydev->duplex) 323 if (DUPLEX_FULL == phydev->duplex)
325 aup->mac->control = ((aup->mac->control 324 aup->mac->control = ((aup->mac->control
@@ -331,14 +330,14 @@ au1000_adjust_link(struct net_device *dev)
331 | MAC_DISABLE_RX_OWN); 330 | MAC_DISABLE_RX_OWN);
332 au_sync_delay(1); 331 au_sync_delay(1);
333 332
334 enable_rx_tx(dev); 333 au1000_enable_rx_tx(dev);
335 aup->old_duplex = phydev->duplex; 334 aup->old_duplex = phydev->duplex;
336 335
337 status_change = 1; 336 status_change = 1;
338 } 337 }
339 338
340 if(phydev->link != aup->old_link) { 339 if (phydev->link != aup->old_link) {
341 // link state changed 340 /* link state changed */
342 341
343 if (!phydev->link) { 342 if (!phydev->link) {
344 /* link went down */ 343 /* link went down */
@@ -354,15 +353,15 @@ au1000_adjust_link(struct net_device *dev)
354 353
355 if (status_change) { 354 if (status_change) {
356 if (phydev->link) 355 if (phydev->link)
357 printk(KERN_INFO "%s: link up (%d/%s)\n", 356 netdev_info(dev, "link up (%d/%s)\n",
358 dev->name, phydev->speed, 357 phydev->speed,
359 DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); 358 DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
360 else 359 else
361 printk(KERN_INFO "%s: link down\n", dev->name); 360 netdev_info(dev, "link down\n");
362 } 361 }
363} 362}
364 363
365static int mii_probe (struct net_device *dev) 364static int au1000_mii_probe (struct net_device *dev)
366{ 365{
367 struct au1000_private *const aup = netdev_priv(dev); 366 struct au1000_private *const aup = netdev_priv(dev);
368 struct phy_device *phydev = NULL; 367 struct phy_device *phydev = NULL;
@@ -373,8 +372,7 @@ static int mii_probe (struct net_device *dev)
373 if (aup->phy_addr) 372 if (aup->phy_addr)
374 phydev = aup->mii_bus->phy_map[aup->phy_addr]; 373 phydev = aup->mii_bus->phy_map[aup->phy_addr];
375 else 374 else
376 printk (KERN_INFO DRV_NAME ":%s: using PHY-less setup\n", 375 netdev_info(dev, "using PHY-less setup\n");
377 dev->name);
378 return 0; 376 return 0;
379 } else { 377 } else {
380 int phy_addr; 378 int phy_addr;
@@ -391,7 +389,7 @@ static int mii_probe (struct net_device *dev)
391 /* try harder to find a PHY */ 389 /* try harder to find a PHY */
392 if (!phydev && (aup->mac_id == 1)) { 390 if (!phydev && (aup->mac_id == 1)) {
393 /* no PHY found, maybe we have a dual PHY? */ 391 /* no PHY found, maybe we have a dual PHY? */
394 printk (KERN_INFO DRV_NAME ": no PHY found on MAC1, " 392 dev_info(&dev->dev, ": no PHY found on MAC1, "
395 "let's see if it's attached to MAC0...\n"); 393 "let's see if it's attached to MAC0...\n");
396 394
397 /* find the first (lowest address) non-attached PHY on 395 /* find the first (lowest address) non-attached PHY on
@@ -417,7 +415,7 @@ static int mii_probe (struct net_device *dev)
417 } 415 }
418 416
419 if (!phydev) { 417 if (!phydev) {
420 printk (KERN_ERR DRV_NAME ":%s: no PHY found\n", dev->name); 418 netdev_err(dev, "no PHY found\n");
421 return -1; 419 return -1;
422 } 420 }
423 421
@@ -428,7 +426,7 @@ static int mii_probe (struct net_device *dev)
428 0, PHY_INTERFACE_MODE_MII); 426 0, PHY_INTERFACE_MODE_MII);
429 427
430 if (IS_ERR(phydev)) { 428 if (IS_ERR(phydev)) {
431 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 429 netdev_err(dev, "Could not attach to PHY\n");
432 return PTR_ERR(phydev); 430 return PTR_ERR(phydev);
433 } 431 }
434 432
@@ -449,8 +447,8 @@ static int mii_probe (struct net_device *dev)
449 aup->old_duplex = -1; 447 aup->old_duplex = -1;
450 aup->phy_dev = phydev; 448 aup->phy_dev = phydev;
451 449
452 printk(KERN_INFO "%s: attached PHY driver [%s] " 450 netdev_info(dev, "attached PHY driver [%s] "
453 "(mii_bus:phy_addr=%s, irq=%d)\n", dev->name, 451 "(mii_bus:phy_addr=%s, irq=%d)\n",
454 phydev->drv->name, dev_name(&phydev->dev), phydev->irq); 452 phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
455 453
456 return 0; 454 return 0;
@@ -462,7 +460,7 @@ static int mii_probe (struct net_device *dev)
462 * has the virtual and dma address of a buffer suitable for 460 * has the virtual and dma address of a buffer suitable for
463 * both, receive and transmit operations. 461 * both, receive and transmit operations.
464 */ 462 */
465static db_dest_t *GetFreeDB(struct au1000_private *aup) 463static db_dest_t *au1000_GetFreeDB(struct au1000_private *aup)
466{ 464{
467 db_dest_t *pDB; 465 db_dest_t *pDB;
468 pDB = aup->pDBfree; 466 pDB = aup->pDBfree;
@@ -473,7 +471,7 @@ static db_dest_t *GetFreeDB(struct au1000_private *aup)
473 return pDB; 471 return pDB;
474} 472}
475 473
476void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB) 474void au1000_ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
477{ 475{
478 db_dest_t *pDBfree = aup->pDBfree; 476 db_dest_t *pDBfree = aup->pDBfree;
479 if (pDBfree) 477 if (pDBfree)
@@ -481,12 +479,12 @@ void ReleaseDB(struct au1000_private *aup, db_dest_t *pDB)
481 aup->pDBfree = pDB; 479 aup->pDBfree = pDB;
482} 480}
483 481
484static void reset_mac_unlocked(struct net_device *dev) 482static void au1000_reset_mac_unlocked(struct net_device *dev)
485{ 483{
486 struct au1000_private *const aup = netdev_priv(dev); 484 struct au1000_private *const aup = netdev_priv(dev);
487 int i; 485 int i;
488 486
489 hard_stop(dev); 487 au1000_hard_stop(dev);
490 488
491 *aup->enable = MAC_EN_CLOCK_ENABLE; 489 *aup->enable = MAC_EN_CLOCK_ENABLE;
492 au_sync_delay(2); 490 au_sync_delay(2);
@@ -507,18 +505,17 @@ static void reset_mac_unlocked(struct net_device *dev)
507 505
508} 506}
509 507
510static void reset_mac(struct net_device *dev) 508static void au1000_reset_mac(struct net_device *dev)
511{ 509{
512 struct au1000_private *const aup = netdev_priv(dev); 510 struct au1000_private *const aup = netdev_priv(dev);
513 unsigned long flags; 511 unsigned long flags;
514 512
515 if (au1000_debug > 4) 513 netif_dbg(aup, hw, dev, "reset mac, aup %x\n",
516 printk(KERN_INFO "%s: reset mac, aup %x\n", 514 (unsigned)aup);
517 dev->name, (unsigned)aup);
518 515
519 spin_lock_irqsave(&aup->lock, flags); 516 spin_lock_irqsave(&aup->lock, flags);
520 517
521 reset_mac_unlocked (dev); 518 au1000_reset_mac_unlocked (dev);
522 519
523 spin_unlock_irqrestore(&aup->lock, flags); 520 spin_unlock_irqrestore(&aup->lock, flags);
524} 521}
@@ -529,7 +526,7 @@ static void reset_mac(struct net_device *dev)
529 * these are not descriptors sitting in memory. 526 * these are not descriptors sitting in memory.
530 */ 527 */
531static void 528static void
532setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) 529au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base)
533{ 530{
534 int i; 531 int i;
535 532
@@ -582,11 +579,25 @@ au1000_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
582 info->regdump_len = 0; 579 info->regdump_len = 0;
583} 580}
584 581
582static void au1000_set_msglevel(struct net_device *dev, u32 value)
583{
584 struct au1000_private *aup = netdev_priv(dev);
585 aup->msg_enable = value;
586}
587
588static u32 au1000_get_msglevel(struct net_device *dev)
589{
590 struct au1000_private *aup = netdev_priv(dev);
591 return aup->msg_enable;
592}
593
585static const struct ethtool_ops au1000_ethtool_ops = { 594static const struct ethtool_ops au1000_ethtool_ops = {
586 .get_settings = au1000_get_settings, 595 .get_settings = au1000_get_settings,
587 .set_settings = au1000_set_settings, 596 .set_settings = au1000_set_settings,
588 .get_drvinfo = au1000_get_drvinfo, 597 .get_drvinfo = au1000_get_drvinfo,
589 .get_link = ethtool_op_get_link, 598 .get_link = ethtool_op_get_link,
599 .get_msglevel = au1000_get_msglevel,
600 .set_msglevel = au1000_set_msglevel,
590}; 601};
591 602
592 603
@@ -606,11 +617,10 @@ static int au1000_init(struct net_device *dev)
606 int i; 617 int i;
607 u32 control; 618 u32 control;
608 619
609 if (au1000_debug > 4) 620 netif_dbg(aup, hw, dev, "au1000_init\n");
610 printk("%s: au1000_init\n", dev->name);
611 621
612 /* bring the device out of reset */ 622 /* bring the device out of reset */
613 enable_mac(dev, 1); 623 au1000_enable_mac(dev, 1);
614 624
615 spin_lock_irqsave(&aup->lock, flags); 625 spin_lock_irqsave(&aup->lock, flags);
616 626
@@ -649,7 +659,7 @@ static int au1000_init(struct net_device *dev)
649 return 0; 659 return 0;
650} 660}
651 661
652static inline void update_rx_stats(struct net_device *dev, u32 status) 662static inline void au1000_update_rx_stats(struct net_device *dev, u32 status)
653{ 663{
654 struct net_device_stats *ps = &dev->stats; 664 struct net_device_stats *ps = &dev->stats;
655 665
@@ -667,8 +677,7 @@ static inline void update_rx_stats(struct net_device *dev, u32 status)
667 ps->rx_crc_errors++; 677 ps->rx_crc_errors++;
668 if (status & RX_COLL) 678 if (status & RX_COLL)
669 ps->collisions++; 679 ps->collisions++;
670 } 680 } else
671 else
672 ps->rx_bytes += status & RX_FRAME_LEN_MASK; 681 ps->rx_bytes += status & RX_FRAME_LEN_MASK;
673 682
674} 683}
@@ -685,15 +694,14 @@ static int au1000_rx(struct net_device *dev)
685 db_dest_t *pDB; 694 db_dest_t *pDB;
686 u32 frmlen; 695 u32 frmlen;
687 696
688 if (au1000_debug > 5) 697 netif_dbg(aup, rx_status, dev, "au1000_rx head %d\n", aup->rx_head);
689 printk("%s: au1000_rx head %d\n", dev->name, aup->rx_head);
690 698
691 prxd = aup->rx_dma_ring[aup->rx_head]; 699 prxd = aup->rx_dma_ring[aup->rx_head];
692 buff_stat = prxd->buff_stat; 700 buff_stat = prxd->buff_stat;
693 while (buff_stat & RX_T_DONE) { 701 while (buff_stat & RX_T_DONE) {
694 status = prxd->status; 702 status = prxd->status;
695 pDB = aup->rx_db_inuse[aup->rx_head]; 703 pDB = aup->rx_db_inuse[aup->rx_head];
696 update_rx_stats(dev, status); 704 au1000_update_rx_stats(dev, status);
697 if (!(status & RX_ERROR)) { 705 if (!(status & RX_ERROR)) {
698 706
699 /* good frame */ 707 /* good frame */
@@ -701,9 +709,7 @@ static int au1000_rx(struct net_device *dev)
701 frmlen -= 4; /* Remove FCS */ 709 frmlen -= 4; /* Remove FCS */
702 skb = dev_alloc_skb(frmlen + 2); 710 skb = dev_alloc_skb(frmlen + 2);
703 if (skb == NULL) { 711 if (skb == NULL) {
704 printk(KERN_ERR 712 netdev_err(dev, "Memory squeeze, dropping packet.\n");
705 "%s: Memory squeeze, dropping packet.\n",
706 dev->name);
707 dev->stats.rx_dropped++; 713 dev->stats.rx_dropped++;
708 continue; 714 continue;
709 } 715 }
@@ -713,8 +719,7 @@ static int au1000_rx(struct net_device *dev)
713 skb_put(skb, frmlen); 719 skb_put(skb, frmlen);
714 skb->protocol = eth_type_trans(skb, dev); 720 skb->protocol = eth_type_trans(skb, dev);
715 netif_rx(skb); /* pass the packet to upper layers */ 721 netif_rx(skb); /* pass the packet to upper layers */
716 } 722 } else {
717 else {
718 if (au1000_debug > 4) { 723 if (au1000_debug > 4) {
719 if (status & RX_MISSED_FRAME) 724 if (status & RX_MISSED_FRAME)
720 printk("rx miss\n"); 725 printk("rx miss\n");
@@ -747,7 +752,7 @@ static int au1000_rx(struct net_device *dev)
747 return 0; 752 return 0;
748} 753}
749 754
750static void update_tx_stats(struct net_device *dev, u32 status) 755static void au1000_update_tx_stats(struct net_device *dev, u32 status)
751{ 756{
752 struct au1000_private *aup = netdev_priv(dev); 757 struct au1000_private *aup = netdev_priv(dev);
753 struct net_device_stats *ps = &dev->stats; 758 struct net_device_stats *ps = &dev->stats;
@@ -760,8 +765,7 @@ static void update_tx_stats(struct net_device *dev, u32 status)
760 ps->tx_errors++; 765 ps->tx_errors++;
761 ps->tx_aborted_errors++; 766 ps->tx_aborted_errors++;
762 } 767 }
763 } 768 } else {
764 else {
765 ps->tx_errors++; 769 ps->tx_errors++;
766 ps->tx_aborted_errors++; 770 ps->tx_aborted_errors++;
767 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER)) 771 if (status & (TX_NO_CARRIER | TX_LOSS_CARRIER))
@@ -783,7 +787,7 @@ static void au1000_tx_ack(struct net_device *dev)
783 ptxd = aup->tx_dma_ring[aup->tx_tail]; 787 ptxd = aup->tx_dma_ring[aup->tx_tail];
784 788
785 while (ptxd->buff_stat & TX_T_DONE) { 789 while (ptxd->buff_stat & TX_T_DONE) {
786 update_tx_stats(dev, ptxd->status); 790 au1000_update_tx_stats(dev, ptxd->status);
787 ptxd->buff_stat &= ~TX_T_DONE; 791 ptxd->buff_stat &= ~TX_T_DONE;
788 ptxd->len = 0; 792 ptxd->len = 0;
789 au_sync(); 793 au_sync();
@@ -817,18 +821,18 @@ static int au1000_open(struct net_device *dev)
817 int retval; 821 int retval;
818 struct au1000_private *aup = netdev_priv(dev); 822 struct au1000_private *aup = netdev_priv(dev);
819 823
820 if (au1000_debug > 4) 824 netif_dbg(aup, drv, dev, "open: dev=%p\n", dev);
821 printk("%s: open: dev=%p\n", dev->name, dev);
822 825
823 if ((retval = request_irq(dev->irq, au1000_interrupt, 0, 826 retval = request_irq(dev->irq, au1000_interrupt, 0,
824 dev->name, dev))) { 827 dev->name, dev);
825 printk(KERN_ERR "%s: unable to get IRQ %d\n", 828 if (retval) {
826 dev->name, dev->irq); 829 netdev_err(dev, "unable to get IRQ %d\n", dev->irq);
827 return retval; 830 return retval;
828 } 831 }
829 832
830 if ((retval = au1000_init(dev))) { 833 retval = au1000_init(dev);
831 printk(KERN_ERR "%s: error in au1000_init\n", dev->name); 834 if (retval) {
835 netdev_err(dev, "error in au1000_init\n");
832 free_irq(dev->irq, dev); 836 free_irq(dev->irq, dev);
833 return retval; 837 return retval;
834 } 838 }
@@ -841,8 +845,7 @@ static int au1000_open(struct net_device *dev)
841 845
842 netif_start_queue(dev); 846 netif_start_queue(dev);
843 847
844 if (au1000_debug > 4) 848 netif_dbg(aup, drv, dev, "open: Initialization done.\n");
845 printk("%s: open: Initialization done.\n", dev->name);
846 849
847 return 0; 850 return 0;
848} 851}
@@ -852,15 +855,14 @@ static int au1000_close(struct net_device *dev)
852 unsigned long flags; 855 unsigned long flags;
853 struct au1000_private *const aup = netdev_priv(dev); 856 struct au1000_private *const aup = netdev_priv(dev);
854 857
855 if (au1000_debug > 4) 858 netif_dbg(aup, drv, dev, "close: dev=%p\n", dev);
856 printk("%s: close: dev=%p\n", dev->name, dev);
857 859
858 if (aup->phy_dev) 860 if (aup->phy_dev)
859 phy_stop(aup->phy_dev); 861 phy_stop(aup->phy_dev);
860 862
861 spin_lock_irqsave(&aup->lock, flags); 863 spin_lock_irqsave(&aup->lock, flags);
862 864
863 reset_mac_unlocked (dev); 865 au1000_reset_mac_unlocked (dev);
864 866
865 /* stop the device */ 867 /* stop the device */
866 netif_stop_queue(dev); 868 netif_stop_queue(dev);
@@ -884,9 +886,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
884 db_dest_t *pDB; 886 db_dest_t *pDB;
885 int i; 887 int i;
886 888
887 if (au1000_debug > 5) 889 netif_dbg(aup, tx_queued, dev, "tx: aup %x len=%d, data=%p, head %d\n",
888 printk("%s: tx: aup %x len=%d, data=%p, head %d\n", 890 (unsigned)aup, skb->len,
889 dev->name, (unsigned)aup, skb->len,
890 skb->data, aup->tx_head); 891 skb->data, aup->tx_head);
891 892
892 ptxd = aup->tx_dma_ring[aup->tx_head]; 893 ptxd = aup->tx_dma_ring[aup->tx_head];
@@ -896,9 +897,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
896 netif_stop_queue(dev); 897 netif_stop_queue(dev);
897 aup->tx_full = 1; 898 aup->tx_full = 1;
898 return NETDEV_TX_BUSY; 899 return NETDEV_TX_BUSY;
899 } 900 } else if (buff_stat & TX_T_DONE) {
900 else if (buff_stat & TX_T_DONE) { 901 au1000_update_tx_stats(dev, ptxd->status);
901 update_tx_stats(dev, ptxd->status);
902 ptxd->len = 0; 902 ptxd->len = 0;
903 } 903 }
904 904
@@ -910,12 +910,11 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
910 pDB = aup->tx_db_inuse[aup->tx_head]; 910 pDB = aup->tx_db_inuse[aup->tx_head];
911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len); 911 skb_copy_from_linear_data(skb, (void *)pDB->vaddr, skb->len);
912 if (skb->len < ETH_ZLEN) { 912 if (skb->len < ETH_ZLEN) {
913 for (i=skb->len; i<ETH_ZLEN; i++) { 913 for (i = skb->len; i < ETH_ZLEN; i++) {
914 ((char *)pDB->vaddr)[i] = 0; 914 ((char *)pDB->vaddr)[i] = 0;
915 } 915 }
916 ptxd->len = ETH_ZLEN; 916 ptxd->len = ETH_ZLEN;
917 } 917 } else
918 else
919 ptxd->len = skb->len; 918 ptxd->len = skb->len;
920 919
921 ps->tx_packets++; 920 ps->tx_packets++;
@@ -935,8 +934,8 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev)
935 */ 934 */
936static void au1000_tx_timeout(struct net_device *dev) 935static void au1000_tx_timeout(struct net_device *dev)
937{ 936{
938 printk(KERN_ERR "%s: au1000_tx_timeout: dev=%p\n", dev->name, dev); 937 netdev_err(dev, "au1000_tx_timeout: dev=%p\n", dev);
939 reset_mac(dev); 938 au1000_reset_mac(dev);
940 au1000_init(dev); 939 au1000_init(dev);
941 dev->trans_start = jiffies; 940 dev->trans_start = jiffies;
942 netif_wake_queue(dev); 941 netif_wake_queue(dev);
@@ -946,8 +945,7 @@ static void au1000_multicast_list(struct net_device *dev)
946{ 945{
947 struct au1000_private *aup = netdev_priv(dev); 946 struct au1000_private *aup = netdev_priv(dev);
948 947
949 if (au1000_debug > 4) 948 netif_dbg(aup, drv, dev, "au1000_multicast_list: flags=%x\n", dev->flags);
950 printk("%s: au1000_multicast_list: flags=%x\n", dev->name, dev->flags);
951 949
952 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 950 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
953 aup->mac->control |= MAC_PROMISCUOUS; 951 aup->mac->control |= MAC_PROMISCUOUS;
@@ -955,14 +953,14 @@ static void au1000_multicast_list(struct net_device *dev)
955 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) { 953 netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
956 aup->mac->control |= MAC_PASS_ALL_MULTI; 954 aup->mac->control |= MAC_PASS_ALL_MULTI;
957 aup->mac->control &= ~MAC_PROMISCUOUS; 955 aup->mac->control &= ~MAC_PROMISCUOUS;
958 printk(KERN_INFO "%s: Pass all multicast\n", dev->name); 956 netdev_info(dev, "Pass all multicast\n");
959 } else { 957 } else {
960 struct dev_mc_list *mclist; 958 struct netdev_hw_addr *ha;
961 u32 mc_filter[2]; /* Multicast hash filter */ 959 u32 mc_filter[2]; /* Multicast hash filter */
962 960
963 mc_filter[1] = mc_filter[0] = 0; 961 mc_filter[1] = mc_filter[0] = 0;
964 netdev_for_each_mc_addr(mclist, dev) 962 netdev_for_each_mc_addr(ha, dev)
965 set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26, 963 set_bit(ether_crc(ETH_ALEN, ha->addr)>>26,
966 (long *)mc_filter); 964 (long *)mc_filter);
967 aup->mac->multi_hash_high = mc_filter[1]; 965 aup->mac->multi_hash_high = mc_filter[1];
968 aup->mac->multi_hash_low = mc_filter[0]; 966 aup->mac->multi_hash_low = mc_filter[0];
@@ -975,9 +973,11 @@ static int au1000_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
975{ 973{
976 struct au1000_private *aup = netdev_priv(dev); 974 struct au1000_private *aup = netdev_priv(dev);
977 975
978 if (!netif_running(dev)) return -EINVAL; 976 if (!netif_running(dev))
977 return -EINVAL;
979 978
980 if (!aup->phy_dev) return -EINVAL; // PHY not controllable 979 if (!aup->phy_dev)
980 return -EINVAL; /* PHY not controllable */
981 981
982 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd); 982 return phy_mii_ioctl(aup->phy_dev, if_mii(rq), cmd);
983} 983}
@@ -996,7 +996,7 @@ static const struct net_device_ops au1000_netdev_ops = {
996 996
997static int __devinit au1000_probe(struct platform_device *pdev) 997static int __devinit au1000_probe(struct platform_device *pdev)
998{ 998{
999 static unsigned version_printed = 0; 999 static unsigned version_printed;
1000 struct au1000_private *aup = NULL; 1000 struct au1000_private *aup = NULL;
1001 struct au1000_eth_platform_data *pd; 1001 struct au1000_eth_platform_data *pd;
1002 struct net_device *dev = NULL; 1002 struct net_device *dev = NULL;
@@ -1007,40 +1007,40 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1007 1007
1008 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1008 base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1009 if (!base) { 1009 if (!base) {
1010 printk(KERN_ERR DRV_NAME ": failed to retrieve base register\n"); 1010 dev_err(&pdev->dev, "failed to retrieve base register\n");
1011 err = -ENODEV; 1011 err = -ENODEV;
1012 goto out; 1012 goto out;
1013 } 1013 }
1014 1014
1015 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1015 macen = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1016 if (!macen) { 1016 if (!macen) {
1017 printk(KERN_ERR DRV_NAME ": failed to retrieve MAC Enable register\n"); 1017 dev_err(&pdev->dev, "failed to retrieve MAC Enable register\n");
1018 err = -ENODEV; 1018 err = -ENODEV;
1019 goto out; 1019 goto out;
1020 } 1020 }
1021 1021
1022 irq = platform_get_irq(pdev, 0); 1022 irq = platform_get_irq(pdev, 0);
1023 if (irq < 0) { 1023 if (irq < 0) {
1024 printk(KERN_ERR DRV_NAME ": failed to retrieve IRQ\n"); 1024 dev_err(&pdev->dev, "failed to retrieve IRQ\n");
1025 err = -ENODEV; 1025 err = -ENODEV;
1026 goto out; 1026 goto out;
1027 } 1027 }
1028 1028
1029 if (!request_mem_region(base->start, resource_size(base), pdev->name)) { 1029 if (!request_mem_region(base->start, resource_size(base), pdev->name)) {
1030 printk(KERN_ERR DRV_NAME ": failed to request memory region for base registers\n"); 1030 dev_err(&pdev->dev, "failed to request memory region for base registers\n");
1031 err = -ENXIO; 1031 err = -ENXIO;
1032 goto out; 1032 goto out;
1033 } 1033 }
1034 1034
1035 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) { 1035 if (!request_mem_region(macen->start, resource_size(macen), pdev->name)) {
1036 printk(KERN_ERR DRV_NAME ": failed to request memory region for MAC enable register\n"); 1036 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n");
1037 err = -ENXIO; 1037 err = -ENXIO;
1038 goto err_request; 1038 goto err_request;
1039 } 1039 }
1040 1040
1041 dev = alloc_etherdev(sizeof(struct au1000_private)); 1041 dev = alloc_etherdev(sizeof(struct au1000_private));
1042 if (!dev) { 1042 if (!dev) {
1043 printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME); 1043 dev_err(&pdev->dev, "alloc_etherdev failed\n");
1044 err = -ENOMEM; 1044 err = -ENOMEM;
1045 goto err_alloc; 1045 goto err_alloc;
1046 } 1046 }
@@ -1050,6 +1050,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1050 aup = netdev_priv(dev); 1050 aup = netdev_priv(dev);
1051 1051
1052 spin_lock_init(&aup->lock); 1052 spin_lock_init(&aup->lock);
1053 aup->msg_enable = (au1000_debug < 4 ? AU1000_DEF_MSG_ENABLE : au1000_debug);
1053 1054
1054 /* Allocate the data buffers */ 1055 /* Allocate the data buffers */
1055 /* Snooping works fine with eth on all au1xxx */ 1056 /* Snooping works fine with eth on all au1xxx */
@@ -1057,7 +1058,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1057 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1058 (NUM_TX_BUFFS + NUM_RX_BUFFS),
1058 &aup->dma_addr, 0); 1059 &aup->dma_addr, 0);
1059 if (!aup->vaddr) { 1060 if (!aup->vaddr) {
1060 printk(KERN_ERR DRV_NAME ": failed to allocate data buffers\n"); 1061 dev_err(&pdev->dev, "failed to allocate data buffers\n");
1061 err = -ENOMEM; 1062 err = -ENOMEM;
1062 goto err_vaddr; 1063 goto err_vaddr;
1063 } 1064 }
@@ -1065,7 +1066,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1065 /* aup->mac is the base address of the MAC's registers */ 1066 /* aup->mac is the base address of the MAC's registers */
1066 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base)); 1067 aup->mac = (volatile mac_reg_t *)ioremap_nocache(base->start, resource_size(base));
1067 if (!aup->mac) { 1068 if (!aup->mac) {
1068 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC registers\n"); 1069 dev_err(&pdev->dev, "failed to ioremap MAC registers\n");
1069 err = -ENXIO; 1070 err = -ENXIO;
1070 goto err_remap1; 1071 goto err_remap1;
1071 } 1072 }
@@ -1073,7 +1074,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1073 /* Setup some variables for quick register address access */ 1074 /* Setup some variables for quick register address access */
1074 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen)); 1075 aup->enable = (volatile u32 *)ioremap_nocache(macen->start, resource_size(macen));
1075 if (!aup->enable) { 1076 if (!aup->enable) {
1076 printk(KERN_ERR DRV_NAME ": failed to ioremap MAC enable register\n"); 1077 dev_err(&pdev->dev, "failed to ioremap MAC enable register\n");
1077 err = -ENXIO; 1078 err = -ENXIO;
1078 goto err_remap2; 1079 goto err_remap2;
1079 } 1080 }
@@ -1083,14 +1084,13 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1083 if (prom_get_ethernet_addr(ethaddr) == 0) 1084 if (prom_get_ethernet_addr(ethaddr) == 0)
1084 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr)); 1085 memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
1085 else { 1086 else {
1086 printk(KERN_INFO "%s: No MAC address found\n", 1087 netdev_info(dev, "No MAC address found\n");
1087 dev->name);
1088 /* Use the hard coded MAC addresses */ 1088 /* Use the hard coded MAC addresses */
1089 } 1089 }
1090 1090
1091 setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1091 au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
1092 } else if (pdev->id == 1) 1092 } else if (pdev->id == 1)
1093 setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1093 au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
1094 1094
1095 /* 1095 /*
1096 * Assign to the Ethernet ports two consecutive MAC addresses 1096 * Assign to the Ethernet ports two consecutive MAC addresses
@@ -1104,7 +1104,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1104 1104
1105 pd = pdev->dev.platform_data; 1105 pd = pdev->dev.platform_data;
1106 if (!pd) { 1106 if (!pd) {
1107 printk(KERN_INFO DRV_NAME ": no platform_data passed, PHY search on MAC0\n"); 1107 dev_info(&pdev->dev, "no platform_data passed, PHY search on MAC0\n");
1108 aup->phy1_search_mac0 = 1; 1108 aup->phy1_search_mac0 = 1;
1109 } else { 1109 } else {
1110 aup->phy_static_config = pd->phy_static_config; 1110 aup->phy_static_config = pd->phy_static_config;
@@ -1116,7 +1116,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1116 } 1116 }
1117 1117
1118 if (aup->phy_busid && aup->phy_busid > 0) { 1118 if (aup->phy_busid && aup->phy_busid > 0) {
1119 printk(KERN_ERR DRV_NAME ": MAC0-associated PHY attached 2nd MACs MII" 1119 dev_err(&pdev->dev, "MAC0-associated PHY attached 2nd MACs MII"
1120 "bus not supported yet\n"); 1120 "bus not supported yet\n");
1121 err = -ENODEV; 1121 err = -ENODEV;
1122 goto err_mdiobus_alloc; 1122 goto err_mdiobus_alloc;
@@ -1124,7 +1124,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1124 1124
1125 aup->mii_bus = mdiobus_alloc(); 1125 aup->mii_bus = mdiobus_alloc();
1126 if (aup->mii_bus == NULL) { 1126 if (aup->mii_bus == NULL) {
1127 printk(KERN_ERR DRV_NAME ": failed to allocate mdiobus structure\n"); 1127 dev_err(&pdev->dev, "failed to allocate mdiobus structure\n");
1128 err = -ENOMEM; 1128 err = -ENOMEM;
1129 goto err_mdiobus_alloc; 1129 goto err_mdiobus_alloc;
1130 } 1130 }
@@ -1139,7 +1139,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1139 if (aup->mii_bus->irq == NULL) 1139 if (aup->mii_bus->irq == NULL)
1140 goto err_out; 1140 goto err_out;
1141 1141
1142 for(i = 0; i < PHY_MAX_ADDR; ++i) 1142 for (i = 0; i < PHY_MAX_ADDR; ++i)
1143 aup->mii_bus->irq[i] = PHY_POLL; 1143 aup->mii_bus->irq[i] = PHY_POLL;
1144 /* if known, set corresponding PHY IRQs */ 1144 /* if known, set corresponding PHY IRQs */
1145 if (aup->phy_static_config) 1145 if (aup->phy_static_config)
@@ -1148,11 +1148,11 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1148 1148
1149 err = mdiobus_register(aup->mii_bus); 1149 err = mdiobus_register(aup->mii_bus);
1150 if (err) { 1150 if (err) {
1151 printk(KERN_ERR DRV_NAME " failed to register MDIO bus\n"); 1151 dev_err(&pdev->dev, "failed to register MDIO bus\n");
1152 goto err_mdiobus_reg; 1152 goto err_mdiobus_reg;
1153 } 1153 }
1154 1154
1155 if (mii_probe(dev) != 0) 1155 if (au1000_mii_probe(dev) != 0)
1156 goto err_out; 1156 goto err_out;
1157 1157
1158 pDBfree = NULL; 1158 pDBfree = NULL;
@@ -1168,7 +1168,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1168 aup->pDBfree = pDBfree; 1168 aup->pDBfree = pDBfree;
1169 1169
1170 for (i = 0; i < NUM_RX_DMA; i++) { 1170 for (i = 0; i < NUM_RX_DMA; i++) {
1171 pDB = GetFreeDB(aup); 1171 pDB = au1000_GetFreeDB(aup);
1172 if (!pDB) { 1172 if (!pDB) {
1173 goto err_out; 1173 goto err_out;
1174 } 1174 }
@@ -1176,7 +1176,7 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1176 aup->rx_db_inuse[i] = pDB; 1176 aup->rx_db_inuse[i] = pDB;
1177 } 1177 }
1178 for (i = 0; i < NUM_TX_DMA; i++) { 1178 for (i = 0; i < NUM_TX_DMA; i++) {
1179 pDB = GetFreeDB(aup); 1179 pDB = au1000_GetFreeDB(aup);
1180 if (!pDB) { 1180 if (!pDB) {
1181 goto err_out; 1181 goto err_out;
1182 } 1182 }
@@ -1195,17 +1195,16 @@ static int __devinit au1000_probe(struct platform_device *pdev)
1195 * The boot code uses the ethernet controller, so reset it to start 1195 * The boot code uses the ethernet controller, so reset it to start
1196 * fresh. au1000_init() expects that the device is in reset state. 1196 * fresh. au1000_init() expects that the device is in reset state.
1197 */ 1197 */
1198 reset_mac(dev); 1198 au1000_reset_mac(dev);
1199 1199
1200 err = register_netdev(dev); 1200 err = register_netdev(dev);
1201 if (err) { 1201 if (err) {
1202 printk(KERN_ERR DRV_NAME "%s: Cannot register net device, aborting.\n", 1202 netdev_err(dev, "Cannot register net device, aborting.\n");
1203 dev->name);
1204 goto err_out; 1203 goto err_out;
1205 } 1204 }
1206 1205
1207 printk("%s: Au1xx0 Ethernet found at 0x%lx, irq %d\n", 1206 netdev_info(dev, "Au1xx0 Ethernet found at 0x%lx, irq %d\n",
1208 dev->name, (unsigned long)base->start, irq); 1207 (unsigned long)base->start, irq);
1209 if (version_printed++ == 0) 1208 if (version_printed++ == 0)
1210 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR); 1209 printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
1211 1210
@@ -1217,15 +1216,15 @@ err_out:
1217 1216
1218 /* here we should have a valid dev plus aup-> register addresses 1217 /* here we should have a valid dev plus aup-> register addresses
1219 * so we can reset the mac properly.*/ 1218 * so we can reset the mac properly.*/
1220 reset_mac(dev); 1219 au1000_reset_mac(dev);
1221 1220
1222 for (i = 0; i < NUM_RX_DMA; i++) { 1221 for (i = 0; i < NUM_RX_DMA; i++) {
1223 if (aup->rx_db_inuse[i]) 1222 if (aup->rx_db_inuse[i])
1224 ReleaseDB(aup, aup->rx_db_inuse[i]); 1223 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1225 } 1224 }
1226 for (i = 0; i < NUM_TX_DMA; i++) { 1225 for (i = 0; i < NUM_TX_DMA; i++) {
1227 if (aup->tx_db_inuse[i]) 1226 if (aup->tx_db_inuse[i])
1228 ReleaseDB(aup, aup->tx_db_inuse[i]); 1227 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1229 } 1228 }
1230err_mdiobus_reg: 1229err_mdiobus_reg:
1231 mdiobus_free(aup->mii_bus); 1230 mdiobus_free(aup->mii_bus);
@@ -1261,11 +1260,11 @@ static int __devexit au1000_remove(struct platform_device *pdev)
1261 1260
1262 for (i = 0; i < NUM_RX_DMA; i++) 1261 for (i = 0; i < NUM_RX_DMA; i++)
1263 if (aup->rx_db_inuse[i]) 1262 if (aup->rx_db_inuse[i])
1264 ReleaseDB(aup, aup->rx_db_inuse[i]); 1263 au1000_ReleaseDB(aup, aup->rx_db_inuse[i]);
1265 1264
1266 for (i = 0; i < NUM_TX_DMA; i++) 1265 for (i = 0; i < NUM_TX_DMA; i++)
1267 if (aup->tx_db_inuse[i]) 1266 if (aup->tx_db_inuse[i])
1268 ReleaseDB(aup, aup->tx_db_inuse[i]); 1267 au1000_ReleaseDB(aup, aup->tx_db_inuse[i]);
1269 1268
1270 dma_free_noncoherent(NULL, MAX_BUF_SIZE * 1269 dma_free_noncoherent(NULL, MAX_BUF_SIZE *
1271 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1270 (NUM_TX_BUFFS + NUM_RX_BUFFS),
diff --git a/drivers/net/au1000_eth.h b/drivers/net/au1000_eth.h
index f9d29a29b8fd..d06ec008fbf1 100644
--- a/drivers/net/au1000_eth.h
+++ b/drivers/net/au1000_eth.h
@@ -35,7 +35,7 @@
35#define NUM_TX_BUFFS 4 35#define NUM_TX_BUFFS 4
36#define MAX_BUF_SIZE 2048 36#define MAX_BUF_SIZE 2048
37 37
38#define ETH_TX_TIMEOUT HZ/4 38#define ETH_TX_TIMEOUT (HZ/4)
39#define MAC_MIN_PKT_SIZE 64 39#define MAC_MIN_PKT_SIZE 64
40 40
41#define MULTICAST_FILTER_LIMIT 64 41#define MULTICAST_FILTER_LIMIT 64
@@ -125,4 +125,6 @@ struct au1000_private {
125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 125 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
126 126
127 spinlock_t lock; /* Serialise access to device */ 127 spinlock_t lock; /* Serialise access to device */
128
129 u32 msg_enable;
128}; 130};
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 69d9f3d368ae..458272196997 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -1681,15 +1681,15 @@ static struct net_device_stats *b44_get_stats(struct net_device *dev)
1681 1681
1682static int __b44_load_mcast(struct b44 *bp, struct net_device *dev) 1682static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
1683{ 1683{
1684 struct dev_mc_list *mclist; 1684 struct netdev_hw_addr *ha;
1685 int i, num_ents; 1685 int i, num_ents;
1686 1686
1687 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE); 1687 num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
1688 i = 0; 1688 i = 0;
1689 netdev_for_each_mc_addr(mclist, dev) { 1689 netdev_for_each_mc_addr(ha, dev) {
1690 if (i == num_ents) 1690 if (i == num_ents)
1691 break; 1691 break;
1692 __b44_cam_write(bp, mclist->dmi_addr, i++ + 1); 1692 __b44_cam_write(bp, ha->addr, i++ + 1);
1693 } 1693 }
1694 return i+1; 1694 return i+1;
1695} 1695}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 17460aba3bae..9a8bdea4a8ec 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -341,11 +341,9 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
341 } 341 }
342 342
343 skb_put(skb, len); 343 skb_put(skb, len);
344 skb->dev = dev;
345 skb->protocol = eth_type_trans(skb, dev); 344 skb->protocol = eth_type_trans(skb, dev);
346 priv->stats.rx_packets++; 345 priv->stats.rx_packets++;
347 priv->stats.rx_bytes += len; 346 priv->stats.rx_bytes += len;
348 dev->last_rx = jiffies;
349 netif_receive_skb(skb); 347 netif_receive_skb(skb);
350 348
351 } while (--budget > 0); 349 } while (--budget > 0);
@@ -605,7 +603,7 @@ static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
605static void bcm_enet_set_multicast_list(struct net_device *dev) 603static void bcm_enet_set_multicast_list(struct net_device *dev)
606{ 604{
607 struct bcm_enet_priv *priv; 605 struct bcm_enet_priv *priv;
608 struct dev_mc_list *mc_list; 606 struct netdev_hw_addr *ha;
609 u32 val; 607 u32 val;
610 int i; 608 int i;
611 609
@@ -633,14 +631,14 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
633 } 631 }
634 632
635 i = 0; 633 i = 0;
636 netdev_for_each_mc_addr(mc_list, dev) { 634 netdev_for_each_mc_addr(ha, dev) {
637 u8 *dmi_addr; 635 u8 *dmi_addr;
638 u32 tmp; 636 u32 tmp;
639 637
640 if (i == 3) 638 if (i == 3)
641 break; 639 break;
642 /* update perfect match registers */ 640 /* update perfect match registers */
643 dmi_addr = mc_list->dmi_addr; 641 dmi_addr = ha->addr;
644 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | 642 tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
645 (dmi_addr[4] << 8) | dmi_addr[5]; 643 (dmi_addr[4] << 8) | dmi_addr[5];
646 enet_writel(priv, tmp, ENET_PML_REG(i + 1)); 644 enet_writel(priv, tmp, ENET_PML_REG(i + 1));
@@ -960,7 +958,9 @@ static int bcm_enet_open(struct net_device *dev)
960 /* all set, enable mac and interrupts, start dma engine and 958 /* all set, enable mac and interrupts, start dma engine and
961 * kick rx dma channel */ 959 * kick rx dma channel */
962 wmb(); 960 wmb();
963 enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); 961 val = enet_readl(priv, ENET_CTL_REG);
962 val |= ENET_CTL_ENABLE_MASK;
963 enet_writel(priv, val, ENET_CTL_REG);
964 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); 964 enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
965 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, 965 enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
966 ENETDMA_CHANCFG_REG(priv->rx_chan)); 966 ENETDMA_CHANCFG_REG(priv->rx_chan));
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 56387b191c96..373c1a563474 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -84,6 +84,8 @@ static inline char *nic_name(struct pci_dev *pdev)
84 84
85#define FW_VER_LEN 32 85#define FW_VER_LEN 32
86 86
87#define BE_MAX_VF 32
88
87struct be_dma_mem { 89struct be_dma_mem {
88 void *va; 90 void *va;
89 dma_addr_t dma; 91 dma_addr_t dma;
@@ -207,7 +209,7 @@ struct be_tx_obj {
207/* Struct to remember the pages posted for rx frags */ 209/* Struct to remember the pages posted for rx frags */
208struct be_rx_page_info { 210struct be_rx_page_info {
209 struct page *page; 211 struct page *page;
210 dma_addr_t bus; 212 DEFINE_DMA_UNMAP_ADDR(bus);
211 u16 page_offset; 213 u16 page_offset;
212 bool last_page_user; 214 bool last_page_user;
213}; 215};
@@ -281,8 +283,15 @@ struct be_adapter {
281 u8 port_type; 283 u8 port_type;
282 u8 transceiver; 284 u8 transceiver;
283 u8 generation; /* BladeEngine ASIC generation */ 285 u8 generation; /* BladeEngine ASIC generation */
286
287 bool sriov_enabled;
288 u32 vf_if_handle[BE_MAX_VF];
289 u32 vf_pmac_id[BE_MAX_VF];
290 u8 base_eq_id;
284}; 291};
285 292
293#define be_physfn(adapter) (!adapter->pdev->is_virtfn)
294
286/* BladeEngine Generation numbers */ 295/* BladeEngine Generation numbers */
287#define BE_GEN2 2 296#define BE_GEN2 2
288#define BE_GEN3 3 297#define BE_GEN3 3
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index d0ef4ac987cd..e79bf8b9af3b 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -843,7 +843,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
843 * Uses mbox 843 * Uses mbox
844 */ 844 */
845int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 845int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
846 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id) 846 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
847 u32 domain)
847{ 848{
848 struct be_mcc_wrb *wrb; 849 struct be_mcc_wrb *wrb;
849 struct be_cmd_req_if_create *req; 850 struct be_cmd_req_if_create *req;
@@ -860,6 +861,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
860 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 861 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
861 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req)); 862 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
862 863
864 req->hdr.domain = domain;
863 req->capability_flags = cpu_to_le32(cap_flags); 865 req->capability_flags = cpu_to_le32(cap_flags);
864 req->enable_flags = cpu_to_le32(en_flags); 866 req->enable_flags = cpu_to_le32(en_flags);
865 req->pmac_invalid = pmac_invalid; 867 req->pmac_invalid = pmac_invalid;
@@ -1111,6 +1113,10 @@ int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1111 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1113 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1112 OPCODE_ETH_PROMISCUOUS, sizeof(*req)); 1114 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1113 1115
1116 /* In FW versions X.102.149/X.101.487 and later,
1117 * the port setting associated only with the
1118 * issuing pci function will take effect
1119 */
1114 if (port_num) 1120 if (port_num)
1115 req->port1_promiscuous = en; 1121 req->port1_promiscuous = en;
1116 else 1122 else
@@ -1157,13 +1163,13 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1157 req->interface_id = if_id; 1163 req->interface_id = if_id;
1158 if (netdev) { 1164 if (netdev) {
1159 int i; 1165 int i;
1160 struct dev_mc_list *mc; 1166 struct netdev_hw_addr *ha;
1161 1167
1162 req->num_mac = cpu_to_le16(netdev_mc_count(netdev)); 1168 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1163 1169
1164 i = 0; 1170 i = 0;
1165 netdev_for_each_mc_addr(mc, netdev) 1171 netdev_for_each_mc_addr(ha, netdev)
1166 memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN); 1172 memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
1167 } else { 1173 } else {
1168 req->promiscuous = 1; 1174 req->promiscuous = 1;
1169 } 1175 }
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index cce61f9a3714..763dc199e337 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -878,7 +878,7 @@ extern int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
878extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id); 878extern int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id);
879extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, 879extern int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags,
880 u32 en_flags, u8 *mac, bool pmac_invalid, 880 u32 en_flags, u8 *mac, bool pmac_invalid,
881 u32 *if_handle, u32 *pmac_id); 881 u32 *if_handle, u32 *pmac_id, u32 domain);
882extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle); 882extern int be_cmd_if_destroy(struct be_adapter *adapter, u32 if_handle);
883extern int be_cmd_eq_create(struct be_adapter *adapter, 883extern int be_cmd_eq_create(struct be_adapter *adapter,
884 struct be_queue_info *eq, int eq_delay); 884 struct be_queue_info *eq, int eq_delay);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 51e1065e7897..d488d52d710a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -496,7 +496,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 496 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
497 &ddrdma_cmd.dma); 497 &ddrdma_cmd.dma);
498 if (!ddrdma_cmd.va) { 498 if (!ddrdma_cmd.va) {
499 dev_err(&adapter->pdev->dev, "Memory allocation failure \n"); 499 dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
500 return -ENOMEM; 500 return -ENOMEM;
501 } 501 }
502 502
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index 2d4a4b827637..063026de4957 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -99,6 +99,9 @@
99/* Number of entries posted */ 99/* Number of entries posted */
100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */ 100#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
101 101
102/********** SRIOV VF PCICFG OFFSET ********/
103#define SRIOV_VF_PCICFG_OFFSET (4096)
104
102/* Flashrom related descriptors */ 105/* Flashrom related descriptors */
103#define IMAGE_TYPE_FIRMWARE 160 106#define IMAGE_TYPE_FIRMWARE 160
104#define IMAGE_TYPE_BOOTCODE 224 107#define IMAGE_TYPE_BOOTCODE 224
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index ec6ace802256..fa10f13242c3 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -26,8 +26,11 @@ MODULE_AUTHOR("ServerEngines Corporation");
26MODULE_LICENSE("GPL"); 26MODULE_LICENSE("GPL");
27 27
28static unsigned int rx_frag_size = 2048; 28static unsigned int rx_frag_size = 2048;
29static unsigned int num_vfs;
29module_param(rx_frag_size, uint, S_IRUGO); 30module_param(rx_frag_size, uint, S_IRUGO);
31module_param(num_vfs, uint, S_IRUGO);
30MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data."); 32MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
33MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
31 34
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 35static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 36 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -138,12 +141,19 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
138 if (!is_valid_ether_addr(addr->sa_data)) 141 if (!is_valid_ether_addr(addr->sa_data))
139 return -EADDRNOTAVAIL; 142 return -EADDRNOTAVAIL;
140 143
144 /* MAC addr configuration will be done in hardware for VFs
145 * by their corresponding PFs. Just copy to netdev addr here
146 */
147 if (!be_physfn(adapter))
148 goto netdev_addr;
149
141 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id); 150 status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
142 if (status) 151 if (status)
143 return status; 152 return status;
144 153
145 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data, 154 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
146 adapter->if_handle, &adapter->pmac_id); 155 adapter->if_handle, &adapter->pmac_id);
156netdev_addr:
147 if (!status) 157 if (!status)
148 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 158 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
149 159
@@ -386,26 +396,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb,
386 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); 396 AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
387} 397}
388 398
399static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
400 bool unmap_single)
401{
402 dma_addr_t dma;
403
404 be_dws_le_to_cpu(wrb, sizeof(*wrb));
405
406 dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
407 if (wrb->frag_len) {
408 if (unmap_single)
409 pci_unmap_single(pdev, dma, wrb->frag_len,
410 PCI_DMA_TODEVICE);
411 else
412 pci_unmap_page(pdev, dma, wrb->frag_len,
413 PCI_DMA_TODEVICE);
414 }
415}
389 416
390static int make_tx_wrbs(struct be_adapter *adapter, 417static int make_tx_wrbs(struct be_adapter *adapter,
391 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) 418 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
392{ 419{
393 u64 busaddr; 420 dma_addr_t busaddr;
394 u32 i, copied = 0; 421 int i, copied = 0;
395 struct pci_dev *pdev = adapter->pdev; 422 struct pci_dev *pdev = adapter->pdev;
396 struct sk_buff *first_skb = skb; 423 struct sk_buff *first_skb = skb;
397 struct be_queue_info *txq = &adapter->tx_obj.q; 424 struct be_queue_info *txq = &adapter->tx_obj.q;
398 struct be_eth_wrb *wrb; 425 struct be_eth_wrb *wrb;
399 struct be_eth_hdr_wrb *hdr; 426 struct be_eth_hdr_wrb *hdr;
427 bool map_single = false;
428 u16 map_head;
400 429
401 hdr = queue_head_node(txq); 430 hdr = queue_head_node(txq);
402 atomic_add(wrb_cnt, &txq->used);
403 queue_head_inc(txq); 431 queue_head_inc(txq);
432 map_head = txq->head;
404 433
405 if (skb->len > skb->data_len) { 434 if (skb->len > skb->data_len) {
406 int len = skb->len - skb->data_len; 435 int len = skb_headlen(skb);
407 busaddr = pci_map_single(pdev, skb->data, len, 436 busaddr = pci_map_single(pdev, skb->data, len,
408 PCI_DMA_TODEVICE); 437 PCI_DMA_TODEVICE);
438 if (pci_dma_mapping_error(pdev, busaddr))
439 goto dma_err;
440 map_single = true;
409 wrb = queue_head_node(txq); 441 wrb = queue_head_node(txq);
410 wrb_fill(wrb, busaddr, len); 442 wrb_fill(wrb, busaddr, len);
411 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 443 be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -419,6 +451,8 @@ static int make_tx_wrbs(struct be_adapter *adapter,
419 busaddr = pci_map_page(pdev, frag->page, 451 busaddr = pci_map_page(pdev, frag->page,
420 frag->page_offset, 452 frag->page_offset,
421 frag->size, PCI_DMA_TODEVICE); 453 frag->size, PCI_DMA_TODEVICE);
454 if (pci_dma_mapping_error(pdev, busaddr))
455 goto dma_err;
422 wrb = queue_head_node(txq); 456 wrb = queue_head_node(txq);
423 wrb_fill(wrb, busaddr, frag->size); 457 wrb_fill(wrb, busaddr, frag->size);
424 be_dws_cpu_to_le(wrb, sizeof(*wrb)); 458 be_dws_cpu_to_le(wrb, sizeof(*wrb));
@@ -438,6 +472,16 @@ static int make_tx_wrbs(struct be_adapter *adapter,
438 be_dws_cpu_to_le(hdr, sizeof(*hdr)); 472 be_dws_cpu_to_le(hdr, sizeof(*hdr));
439 473
440 return copied; 474 return copied;
475dma_err:
476 txq->head = map_head;
477 while (copied) {
478 wrb = queue_head_node(txq);
479 unmap_tx_frag(pdev, wrb, map_single);
480 map_single = false;
481 copied -= wrb->frag_len;
482 queue_head_inc(txq);
483 }
484 return 0;
441} 485}
442 486
443static netdev_tx_t be_xmit(struct sk_buff *skb, 487static netdev_tx_t be_xmit(struct sk_buff *skb,
@@ -462,6 +506,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
462 * *BEFORE* ringing the tx doorbell, so that we serialze the 506 * *BEFORE* ringing the tx doorbell, so that we serialze the
463 * tx compls of the current transmit which'll wake up the queue 507 * tx compls of the current transmit which'll wake up the queue
464 */ 508 */
509 atomic_add(wrb_cnt, &txq->used);
465 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= 510 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
466 txq->len) { 511 txq->len) {
467 netif_stop_queue(netdev); 512 netif_stop_queue(netdev);
@@ -541,6 +586,9 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
541{ 586{
542 struct be_adapter *adapter = netdev_priv(netdev); 587 struct be_adapter *adapter = netdev_priv(netdev);
543 588
589 if (!be_physfn(adapter))
590 return;
591
544 adapter->vlan_tag[vid] = 1; 592 adapter->vlan_tag[vid] = 1;
545 adapter->vlans_added++; 593 adapter->vlans_added++;
546 if (adapter->vlans_added <= (adapter->max_vlans + 1)) 594 if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@ -551,6 +599,9 @@ static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
551{ 599{
552 struct be_adapter *adapter = netdev_priv(netdev); 600 struct be_adapter *adapter = netdev_priv(netdev);
553 601
602 if (!be_physfn(adapter))
603 return;
604
554 adapter->vlan_tag[vid] = 0; 605 adapter->vlan_tag[vid] = 0;
555 vlan_group_set_device(adapter->vlan_grp, vid, NULL); 606 vlan_group_set_device(adapter->vlan_grp, vid, NULL);
556 adapter->vlans_added--; 607 adapter->vlans_added--;
@@ -588,6 +639,28 @@ done:
588 return; 639 return;
589} 640}
590 641
642static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
643{
644 struct be_adapter *adapter = netdev_priv(netdev);
645 int status;
646
647 if (!adapter->sriov_enabled)
648 return -EPERM;
649
650 if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
651 return -EINVAL;
652
653 status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
654 adapter->vf_pmac_id[vf]);
655
656 status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
657 &adapter->vf_pmac_id[vf]);
658 if (!status)
659 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
660 mac, vf);
661 return status;
662}
663
591static void be_rx_rate_update(struct be_adapter *adapter) 664static void be_rx_rate_update(struct be_adapter *adapter)
592{ 665{
593 struct be_drvr_stats *stats = drvr_stats(adapter); 666 struct be_drvr_stats *stats = drvr_stats(adapter);
@@ -647,7 +720,7 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
647 BUG_ON(!rx_page_info->page); 720 BUG_ON(!rx_page_info->page);
648 721
649 if (rx_page_info->last_page_user) { 722 if (rx_page_info->last_page_user) {
650 pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus), 723 pci_unmap_page(adapter->pdev, dma_unmap_addr(rx_page_info, bus),
651 adapter->big_page_size, PCI_DMA_FROMDEVICE); 724 adapter->big_page_size, PCI_DMA_FROMDEVICE);
652 rx_page_info->last_page_user = false; 725 rx_page_info->last_page_user = false;
653 } 726 }
@@ -791,7 +864,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
791 864
792 skb->truesize = skb->len + sizeof(struct sk_buff); 865 skb->truesize = skb->len + sizeof(struct sk_buff);
793 skb->protocol = eth_type_trans(skb, adapter->netdev); 866 skb->protocol = eth_type_trans(skb, adapter->netdev);
794 skb->dev = adapter->netdev;
795 867
796 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 868 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
797 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp); 869 vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
@@ -959,7 +1031,7 @@ static void be_post_rx_frags(struct be_adapter *adapter)
959 } 1031 }
960 page_offset = page_info->page_offset; 1032 page_offset = page_info->page_offset;
961 page_info->page = pagep; 1033 page_info->page = pagep;
962 pci_unmap_addr_set(page_info, bus, page_dmaaddr); 1034 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
963 frag_dmaaddr = page_dmaaddr + page_info->page_offset; 1035 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
964 1036
965 rxd = queue_head_node(rxq); 1037 rxd = queue_head_node(rxq);
@@ -1012,35 +1084,26 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
1012 struct be_eth_wrb *wrb; 1084 struct be_eth_wrb *wrb;
1013 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; 1085 struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
1014 struct sk_buff *sent_skb; 1086 struct sk_buff *sent_skb;
1015 u64 busaddr; 1087 u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1016 u16 cur_index, num_wrbs = 0; 1088 bool unmap_skb_hdr = true;
1017 1089
1018 cur_index = txq->tail; 1090 sent_skb = sent_skbs[txq->tail];
1019 sent_skb = sent_skbs[cur_index];
1020 BUG_ON(!sent_skb); 1091 BUG_ON(!sent_skb);
1021 sent_skbs[cur_index] = NULL; 1092 sent_skbs[txq->tail] = NULL;
1022 wrb = queue_tail_node(txq); 1093
1023 be_dws_le_to_cpu(wrb, sizeof(*wrb)); 1094 /* skip header wrb */
1024 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
1025 if (busaddr != 0) {
1026 pci_unmap_single(adapter->pdev, busaddr,
1027 wrb->frag_len, PCI_DMA_TODEVICE);
1028 }
1029 num_wrbs++;
1030 queue_tail_inc(txq); 1095 queue_tail_inc(txq);
1031 1096
1032 while (cur_index != last_index) { 1097 do {
1033 cur_index = txq->tail; 1098 cur_index = txq->tail;
1034 wrb = queue_tail_node(txq); 1099 wrb = queue_tail_node(txq);
1035 be_dws_le_to_cpu(wrb, sizeof(*wrb)); 1100 unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
1036 busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; 1101 skb_headlen(sent_skb)));
1037 if (busaddr != 0) { 1102 unmap_skb_hdr = false;
1038 pci_unmap_page(adapter->pdev, busaddr, 1103
1039 wrb->frag_len, PCI_DMA_TODEVICE);
1040 }
1041 num_wrbs++; 1104 num_wrbs++;
1042 queue_tail_inc(txq); 1105 queue_tail_inc(txq);
1043 } 1106 } while (cur_index != last_index);
1044 1107
1045 atomic_sub(num_wrbs, &txq->used); 1108 atomic_sub(num_wrbs, &txq->used);
1046 1109
@@ -1255,6 +1318,8 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1255 /* Ask BE to create Tx Event queue */ 1318 /* Ask BE to create Tx Event queue */
1256 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1319 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1257 goto tx_eq_free; 1320 goto tx_eq_free;
1321 adapter->base_eq_id = adapter->tx_eq.q.id;
1322
1258 /* Alloc TX eth compl queue */ 1323 /* Alloc TX eth compl queue */
1259 cq = &adapter->tx_obj.cq; 1324 cq = &adapter->tx_obj.cq;
1260 if (be_queue_alloc(adapter, cq, TX_CQ_LEN, 1325 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@ -1382,7 +1447,7 @@ rx_eq_free:
1382/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1447/* There are 8 evt ids per func. Retruns the evt id's bit number */
1383static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id) 1448static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1384{ 1449{
1385 return eq_id % 8; 1450 return eq_id - adapter->base_eq_id;
1386} 1451}
1387 1452
1388static irqreturn_t be_intx(int irq, void *dev) 1453static irqreturn_t be_intx(int irq, void *dev)
@@ -1560,6 +1625,28 @@ static void be_msix_enable(struct be_adapter *adapter)
1560 return; 1625 return;
1561} 1626}
1562 1627
1628static void be_sriov_enable(struct be_adapter *adapter)
1629{
1630#ifdef CONFIG_PCI_IOV
1631 int status;
1632 if (be_physfn(adapter) && num_vfs) {
1633 status = pci_enable_sriov(adapter->pdev, num_vfs);
1634 adapter->sriov_enabled = status ? false : true;
1635 }
1636#endif
1637 return;
1638}
1639
1640static void be_sriov_disable(struct be_adapter *adapter)
1641{
1642#ifdef CONFIG_PCI_IOV
1643 if (adapter->sriov_enabled) {
1644 pci_disable_sriov(adapter->pdev);
1645 adapter->sriov_enabled = false;
1646 }
1647#endif
1648}
1649
1563static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1650static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
1564{ 1651{
1565 return adapter->msix_entries[ 1652 return adapter->msix_entries[
@@ -1617,6 +1704,9 @@ static int be_irq_register(struct be_adapter *adapter)
1617 status = be_msix_register(adapter); 1704 status = be_msix_register(adapter);
1618 if (status == 0) 1705 if (status == 0)
1619 goto done; 1706 goto done;
1707 /* INTx is not supported for VF */
1708 if (!be_physfn(adapter))
1709 return status;
1620 } 1710 }
1621 1711
1622 /* INTx */ 1712 /* INTx */
@@ -1690,14 +1780,17 @@ static int be_open(struct net_device *netdev)
1690 goto ret_sts; 1780 goto ret_sts;
1691 be_link_status_update(adapter, link_up); 1781 be_link_status_update(adapter, link_up);
1692 1782
1693 status = be_vid_config(adapter); 1783 if (be_physfn(adapter))
1784 status = be_vid_config(adapter);
1694 if (status) 1785 if (status)
1695 goto ret_sts; 1786 goto ret_sts;
1696 1787
1697 status = be_cmd_set_flow_control(adapter, 1788 if (be_physfn(adapter)) {
1698 adapter->tx_fc, adapter->rx_fc); 1789 status = be_cmd_set_flow_control(adapter,
1699 if (status) 1790 adapter->tx_fc, adapter->rx_fc);
1700 goto ret_sts; 1791 if (status)
1792 goto ret_sts;
1793 }
1701 1794
1702 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100)); 1795 schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
1703ret_sts: 1796ret_sts:
@@ -1723,7 +1816,7 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
1723 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK); 1816 PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
1724 if (status) { 1817 if (status) {
1725 dev_err(&adapter->pdev->dev, 1818 dev_err(&adapter->pdev->dev,
1726 "Could not enable Wake-on-lan \n"); 1819 "Could not enable Wake-on-lan\n");
1727 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, 1820 pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
1728 cmd.dma); 1821 cmd.dma);
1729 return status; 1822 return status;
@@ -1745,22 +1838,48 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
1745static int be_setup(struct be_adapter *adapter) 1838static int be_setup(struct be_adapter *adapter)
1746{ 1839{
1747 struct net_device *netdev = adapter->netdev; 1840 struct net_device *netdev = adapter->netdev;
1748 u32 cap_flags, en_flags; 1841 u32 cap_flags, en_flags, vf = 0;
1749 int status; 1842 int status;
1843 u8 mac[ETH_ALEN];
1750 1844
1751 cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1845 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
1752 BE_IF_FLAGS_MCAST_PROMISCUOUS | 1846
1753 BE_IF_FLAGS_PROMISCUOUS | 1847 if (be_physfn(adapter)) {
1754 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1848 cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
1755 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST | 1849 BE_IF_FLAGS_PROMISCUOUS |
1756 BE_IF_FLAGS_PASS_L3L4_ERRORS; 1850 BE_IF_FLAGS_PASS_L3L4_ERRORS;
1851 en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
1852 }
1757 1853
1758 status = be_cmd_if_create(adapter, cap_flags, en_flags, 1854 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1759 netdev->dev_addr, false/* pmac_invalid */, 1855 netdev->dev_addr, false/* pmac_invalid */,
1760 &adapter->if_handle, &adapter->pmac_id); 1856 &adapter->if_handle, &adapter->pmac_id, 0);
1761 if (status != 0) 1857 if (status != 0)
1762 goto do_none; 1858 goto do_none;
1763 1859
1860 if (be_physfn(adapter)) {
1861 while (vf < num_vfs) {
1862 cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
1863 | BE_IF_FLAGS_BROADCAST;
1864 status = be_cmd_if_create(adapter, cap_flags, en_flags,
1865 mac, true, &adapter->vf_if_handle[vf],
1866 NULL, vf+1);
1867 if (status) {
1868 dev_err(&adapter->pdev->dev,
1869 "Interface Create failed for VF %d\n", vf);
1870 goto if_destroy;
1871 }
1872 vf++;
1873 } while (vf < num_vfs);
1874 } else if (!be_physfn(adapter)) {
1875 status = be_cmd_mac_addr_query(adapter, mac,
1876 MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
1877 if (!status) {
1878 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
1879 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
1880 }
1881 }
1882
1764 status = be_tx_queues_create(adapter); 1883 status = be_tx_queues_create(adapter);
1765 if (status != 0) 1884 if (status != 0)
1766 goto if_destroy; 1885 goto if_destroy;
@@ -1782,6 +1901,9 @@ rx_qs_destroy:
1782tx_qs_destroy: 1901tx_qs_destroy:
1783 be_tx_queues_destroy(adapter); 1902 be_tx_queues_destroy(adapter);
1784if_destroy: 1903if_destroy:
1904 for (vf = 0; vf < num_vfs; vf++)
1905 if (adapter->vf_if_handle[vf])
1906 be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
1785 be_cmd_if_destroy(adapter, adapter->if_handle); 1907 be_cmd_if_destroy(adapter, adapter->if_handle);
1786do_none: 1908do_none:
1787 return status; 1909 return status;
@@ -2061,6 +2183,7 @@ static struct net_device_ops be_netdev_ops = {
2061 .ndo_vlan_rx_register = be_vlan_register, 2183 .ndo_vlan_rx_register = be_vlan_register,
2062 .ndo_vlan_rx_add_vid = be_vlan_add_vid, 2184 .ndo_vlan_rx_add_vid = be_vlan_add_vid,
2063 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid, 2185 .ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
2186 .ndo_set_vf_mac = be_set_vf_mac
2064}; 2187};
2065 2188
2066static void be_netdev_init(struct net_device *netdev) 2189static void be_netdev_init(struct net_device *netdev)
@@ -2102,37 +2225,48 @@ static void be_unmap_pci_bars(struct be_adapter *adapter)
2102 iounmap(adapter->csr); 2225 iounmap(adapter->csr);
2103 if (adapter->db) 2226 if (adapter->db)
2104 iounmap(adapter->db); 2227 iounmap(adapter->db);
2105 if (adapter->pcicfg) 2228 if (adapter->pcicfg && be_physfn(adapter))
2106 iounmap(adapter->pcicfg); 2229 iounmap(adapter->pcicfg);
2107} 2230}
2108 2231
2109static int be_map_pci_bars(struct be_adapter *adapter) 2232static int be_map_pci_bars(struct be_adapter *adapter)
2110{ 2233{
2111 u8 __iomem *addr; 2234 u8 __iomem *addr;
2112 int pcicfg_reg; 2235 int pcicfg_reg, db_reg;
2113
2114 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2115 pci_resource_len(adapter->pdev, 2));
2116 if (addr == NULL)
2117 return -ENOMEM;
2118 adapter->csr = addr;
2119 2236
2120 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4), 2237 if (be_physfn(adapter)) {
2121 128 * 1024); 2238 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2122 if (addr == NULL) 2239 pci_resource_len(adapter->pdev, 2));
2123 goto pci_map_err; 2240 if (addr == NULL)
2124 adapter->db = addr; 2241 return -ENOMEM;
2242 adapter->csr = addr;
2243 }
2125 2244
2126 if (adapter->generation == BE_GEN2) 2245 if (adapter->generation == BE_GEN2) {
2127 pcicfg_reg = 1; 2246 pcicfg_reg = 1;
2128 else 2247 db_reg = 4;
2248 } else {
2129 pcicfg_reg = 0; 2249 pcicfg_reg = 0;
2130 2250 if (be_physfn(adapter))
2131 addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg), 2251 db_reg = 4;
2132 pci_resource_len(adapter->pdev, pcicfg_reg)); 2252 else
2253 db_reg = 0;
2254 }
2255 addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
2256 pci_resource_len(adapter->pdev, db_reg));
2133 if (addr == NULL) 2257 if (addr == NULL)
2134 goto pci_map_err; 2258 goto pci_map_err;
2135 adapter->pcicfg = addr; 2259 adapter->db = addr;
2260
2261 if (be_physfn(adapter)) {
2262 addr = ioremap_nocache(
2263 pci_resource_start(adapter->pdev, pcicfg_reg),
2264 pci_resource_len(adapter->pdev, pcicfg_reg));
2265 if (addr == NULL)
2266 goto pci_map_err;
2267 adapter->pcicfg = addr;
2268 } else
2269 adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
2136 2270
2137 return 0; 2271 return 0;
2138pci_map_err: 2272pci_map_err:
@@ -2246,6 +2380,8 @@ static void __devexit be_remove(struct pci_dev *pdev)
2246 2380
2247 be_ctrl_cleanup(adapter); 2381 be_ctrl_cleanup(adapter);
2248 2382
2383 be_sriov_disable(adapter);
2384
2249 be_msix_disable(adapter); 2385 be_msix_disable(adapter);
2250 2386
2251 pci_set_drvdata(pdev, NULL); 2387 pci_set_drvdata(pdev, NULL);
@@ -2270,16 +2406,20 @@ static int be_get_config(struct be_adapter *adapter)
2270 return status; 2406 return status;
2271 2407
2272 memset(mac, 0, ETH_ALEN); 2408 memset(mac, 0, ETH_ALEN);
2273 status = be_cmd_mac_addr_query(adapter, mac, 2409
2410 if (be_physfn(adapter)) {
2411 status = be_cmd_mac_addr_query(adapter, mac,
2274 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0); 2412 MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
2275 if (status)
2276 return status;
2277 2413
2278 if (!is_valid_ether_addr(mac)) 2414 if (status)
2279 return -EADDRNOTAVAIL; 2415 return status;
2416
2417 if (!is_valid_ether_addr(mac))
2418 return -EADDRNOTAVAIL;
2280 2419
2281 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 2420 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2282 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 2421 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2422 }
2283 2423
2284 if (adapter->cap & 0x400) 2424 if (adapter->cap & 0x400)
2285 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4; 2425 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@ -2296,6 +2436,7 @@ static int __devinit be_probe(struct pci_dev *pdev,
2296 struct be_adapter *adapter; 2436 struct be_adapter *adapter;
2297 struct net_device *netdev; 2437 struct net_device *netdev;
2298 2438
2439
2299 status = pci_enable_device(pdev); 2440 status = pci_enable_device(pdev);
2300 if (status) 2441 if (status)
2301 goto do_none; 2442 goto do_none;
@@ -2344,24 +2485,28 @@ static int __devinit be_probe(struct pci_dev *pdev,
2344 } 2485 }
2345 } 2486 }
2346 2487
2488 be_sriov_enable(adapter);
2489
2347 status = be_ctrl_init(adapter); 2490 status = be_ctrl_init(adapter);
2348 if (status) 2491 if (status)
2349 goto free_netdev; 2492 goto free_netdev;
2350 2493
2351 /* sync up with fw's ready state */ 2494 /* sync up with fw's ready state */
2352 status = be_cmd_POST(adapter); 2495 if (be_physfn(adapter)) {
2353 if (status) 2496 status = be_cmd_POST(adapter);
2354 goto ctrl_clean; 2497 if (status)
2498 goto ctrl_clean;
2499
2500 status = be_cmd_reset_function(adapter);
2501 if (status)
2502 goto ctrl_clean;
2503 }
2355 2504
2356 /* tell fw we're ready to fire cmds */ 2505 /* tell fw we're ready to fire cmds */
2357 status = be_cmd_fw_init(adapter); 2506 status = be_cmd_fw_init(adapter);
2358 if (status) 2507 if (status)
2359 goto ctrl_clean; 2508 goto ctrl_clean;
2360 2509
2361 status = be_cmd_reset_function(adapter);
2362 if (status)
2363 goto ctrl_clean;
2364
2365 status = be_stats_init(adapter); 2510 status = be_stats_init(adapter);
2366 if (status) 2511 if (status)
2367 goto ctrl_clean; 2512 goto ctrl_clean;
@@ -2391,6 +2536,7 @@ ctrl_clean:
2391 be_ctrl_cleanup(adapter); 2536 be_ctrl_cleanup(adapter);
2392free_netdev: 2537free_netdev:
2393 be_msix_disable(adapter); 2538 be_msix_disable(adapter);
2539 be_sriov_disable(adapter);
2394 free_netdev(adapter->netdev); 2540 free_netdev(adapter->netdev);
2395 pci_set_drvdata(pdev, NULL); 2541 pci_set_drvdata(pdev, NULL);
2396rel_reg: 2542rel_reg:
@@ -2587,6 +2733,13 @@ static int __init be_init_module(void)
2587 rx_frag_size = 2048; 2733 rx_frag_size = 2048;
2588 } 2734 }
2589 2735
2736 if (num_vfs > 32) {
2737 printk(KERN_WARNING DRV_NAME
2738 " : Module param num_vfs must not be greater than 32."
2739 "Using 32\n");
2740 num_vfs = 32;
2741 }
2742
2590 return pci_register_driver(&be_driver); 2743 return pci_register_driver(&be_driver);
2591} 2744}
2592module_init(be_init_module); 2745module_init(be_init_module);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 587f93cf03f6..c488cea8f455 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -812,14 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev)
812static void bfin_mac_multicast_hash(struct net_device *dev) 812static void bfin_mac_multicast_hash(struct net_device *dev)
813{ 813{
814 u32 emac_hashhi, emac_hashlo; 814 u32 emac_hashhi, emac_hashlo;
815 struct dev_mc_list *dmi; 815 struct netdev_hw_addr *ha;
816 char *addrs; 816 char *addrs;
817 u32 crc; 817 u32 crc;
818 818
819 emac_hashhi = emac_hashlo = 0; 819 emac_hashhi = emac_hashlo = 0;
820 820
821 netdev_for_each_mc_addr(dmi, dev) { 821 netdev_for_each_mc_addr(ha, dev) {
822 addrs = dmi->dmi_addr; 822 addrs = ha->addr;
823 823
824 /* skip non-multicast addresses */ 824 /* skip non-multicast addresses */
825 if (!(*addrs & 1)) 825 if (!(*addrs & 1))
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 598b007f1991..44ceecf9d103 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -972,7 +972,7 @@ bmac_remove_multi(struct net_device *dev,
972 */ 972 */
973static void bmac_set_multicast(struct net_device *dev) 973static void bmac_set_multicast(struct net_device *dev)
974{ 974{
975 struct dev_mc_list *dmi; 975 struct netdev_hw_addr *ha;
976 struct bmac_data *bp = netdev_priv(dev); 976 struct bmac_data *bp = netdev_priv(dev);
977 int num_addrs = netdev_mc_count(dev); 977 int num_addrs = netdev_mc_count(dev);
978 unsigned short rx_cfg; 978 unsigned short rx_cfg;
@@ -1001,8 +1001,8 @@ static void bmac_set_multicast(struct net_device *dev)
1001 rx_cfg = bmac_rx_on(dev, 0, 0); 1001 rx_cfg = bmac_rx_on(dev, 0, 0);
1002 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); 1002 XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
1003 } else { 1003 } else {
1004 netdev_for_each_mc_addr(dmi, dev) 1004 netdev_for_each_mc_addr(ha, dev)
1005 bmac_addhash(bp, dmi->dmi_addr); 1005 bmac_addhash(bp, ha->addr);
1006 bmac_update_hash_table_mask(dev, bp); 1006 bmac_update_hash_table_mask(dev, bp);
1007 rx_cfg = bmac_rx_on(dev, 1, 0); 1007 rx_cfg = bmac_rx_on(dev, 1, 0);
1008 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); 1008 XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg));
@@ -1016,7 +1016,7 @@ static void bmac_set_multicast(struct net_device *dev)
1016 1016
1017static void bmac_set_multicast(struct net_device *dev) 1017static void bmac_set_multicast(struct net_device *dev)
1018{ 1018{
1019 struct dev_mc_list *dmi; 1019 struct netdev_hw_addr *ha;
1020 char *addrs; 1020 char *addrs;
1021 int i; 1021 int i;
1022 unsigned short rx_cfg; 1022 unsigned short rx_cfg;
@@ -1040,8 +1040,8 @@ static void bmac_set_multicast(struct net_device *dev)
1040 1040
1041 for(i = 0; i < 4; i++) hash_table[i] = 0; 1041 for(i = 0; i < 4; i++) hash_table[i] = 0;
1042 1042
1043 netdev_for_each_mc_addr(dmi, dev) { 1043 netdev_for_each_mc_addr(ha, dev) {
1044 addrs = dmi->dmi_addr; 1044 addrs = ha->addr;
1045 1045
1046 if(!(*addrs & 1)) 1046 if(!(*addrs & 1))
1047 continue; 1047 continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index a257babd1bb4..53326fed6c81 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2670,7 +2670,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670 } 2670 }
2671 2671
2672 rx_pg->page = page; 2672 rx_pg->page = page;
2673 pci_unmap_addr_set(rx_pg, mapping, mapping); 2673 dma_unmap_addr_set(rx_pg, mapping, mapping);
2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2674 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2675 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2676 return 0; 2676 return 0;
@@ -2685,7 +2685,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2685 if (!page) 2685 if (!page)
2686 return; 2686 return;
2687 2687
2688 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE, 2688 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2689 PCI_DMA_FROMDEVICE); 2689 PCI_DMA_FROMDEVICE);
2690 2690
2691 __free_page(page); 2691 __free_page(page);
@@ -2717,7 +2717,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2717 } 2717 }
2718 2718
2719 rx_buf->skb = skb; 2719 rx_buf->skb = skb;
2720 pci_unmap_addr_set(rx_buf, mapping, mapping); 2720 dma_unmap_addr_set(rx_buf, mapping, mapping);
2721 2721
2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32; 2722 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff; 2723 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
@@ -2816,7 +2816,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2816 } 2816 }
2817 } 2817 }
2818 2818
2819 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 2819 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2820 skb_headlen(skb), PCI_DMA_TODEVICE); 2820 skb_headlen(skb), PCI_DMA_TODEVICE);
2821 2821
2822 tx_buf->skb = NULL; 2822 tx_buf->skb = NULL;
@@ -2826,7 +2826,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2826 sw_cons = NEXT_TX_BD(sw_cons); 2826 sw_cons = NEXT_TX_BD(sw_cons);
2827 2827
2828 pci_unmap_page(bp->pdev, 2828 pci_unmap_page(bp->pdev,
2829 pci_unmap_addr( 2829 dma_unmap_addr(
2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)], 2830 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2831 mapping), 2831 mapping),
2832 skb_shinfo(skb)->frags[i].size, 2832 skb_shinfo(skb)->frags[i].size,
@@ -2908,8 +2908,8 @@ bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2908 if (prod != cons) { 2908 if (prod != cons) {
2909 prod_rx_pg->page = cons_rx_pg->page; 2909 prod_rx_pg->page = cons_rx_pg->page;
2910 cons_rx_pg->page = NULL; 2910 cons_rx_pg->page = NULL;
2911 pci_unmap_addr_set(prod_rx_pg, mapping, 2911 dma_unmap_addr_set(prod_rx_pg, mapping,
2912 pci_unmap_addr(cons_rx_pg, mapping)); 2912 dma_unmap_addr(cons_rx_pg, mapping));
2913 2913
2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi; 2914 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo; 2915 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
@@ -2933,7 +2933,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2933 prod_rx_buf = &rxr->rx_buf_ring[prod]; 2933 prod_rx_buf = &rxr->rx_buf_ring[prod];
2934 2934
2935 pci_dma_sync_single_for_device(bp->pdev, 2935 pci_dma_sync_single_for_device(bp->pdev,
2936 pci_unmap_addr(cons_rx_buf, mapping), 2936 dma_unmap_addr(cons_rx_buf, mapping),
2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 2937 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2938 2938
2939 rxr->rx_prod_bseq += bp->rx_buf_use_size; 2939 rxr->rx_prod_bseq += bp->rx_buf_use_size;
@@ -2943,8 +2943,8 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2943 if (cons == prod) 2943 if (cons == prod)
2944 return; 2944 return;
2945 2945
2946 pci_unmap_addr_set(prod_rx_buf, mapping, 2946 dma_unmap_addr_set(prod_rx_buf, mapping,
2947 pci_unmap_addr(cons_rx_buf, mapping)); 2947 dma_unmap_addr(cons_rx_buf, mapping));
2948 2948
2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)]; 2949 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)]; 2950 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
@@ -3017,7 +3017,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
3017 /* Don't unmap yet. If we're unable to allocate a new 3017 /* Don't unmap yet. If we're unable to allocate a new
3018 * page, we need to recycle the page and the DMA addr. 3018 * page, we need to recycle the page and the DMA addr.
3019 */ 3019 */
3020 mapping_old = pci_unmap_addr(rx_pg, mapping); 3020 mapping_old = dma_unmap_addr(rx_pg, mapping);
3021 if (i == pages - 1) 3021 if (i == pages - 1)
3022 frag_len -= 4; 3022 frag_len -= 4;
3023 3023
@@ -3098,7 +3098,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3098 3098
3099 rx_buf->skb = NULL; 3099 rx_buf->skb = NULL;
3100 3100
3101 dma_addr = pci_unmap_addr(rx_buf, mapping); 3101 dma_addr = dma_unmap_addr(rx_buf, mapping);
3102 3102
3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr, 3103 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, 3104 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
@@ -3546,7 +3546,6 @@ bnx2_set_rx_mode(struct net_device *dev)
3546 } 3546 }
3547 else { 3547 else {
3548 /* Accept one or more multicast(s). */ 3548 /* Accept one or more multicast(s). */
3549 struct dev_mc_list *mclist;
3550 u32 mc_filter[NUM_MC_HASH_REGISTERS]; 3549 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3551 u32 regidx; 3550 u32 regidx;
3552 u32 bit; 3551 u32 bit;
@@ -3554,8 +3553,8 @@ bnx2_set_rx_mode(struct net_device *dev)
3554 3553
3555 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS); 3554 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3556 3555
3557 netdev_for_each_mc_addr(mclist, dev) { 3556 netdev_for_each_mc_addr(ha, dev) {
3558 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 3557 crc = ether_crc_le(ETH_ALEN, ha->addr);
3559 bit = crc & 0xff; 3558 bit = crc & 0xff;
3560 regidx = (bit & 0xe0) >> 5; 3559 regidx = (bit & 0xe0) >> 5;
3561 bit &= 0x1f; 3560 bit &= 0x1f;
@@ -5312,7 +5311,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5312 } 5311 }
5313 5312
5314 pci_unmap_single(bp->pdev, 5313 pci_unmap_single(bp->pdev,
5315 pci_unmap_addr(tx_buf, mapping), 5314 dma_unmap_addr(tx_buf, mapping),
5316 skb_headlen(skb), 5315 skb_headlen(skb),
5317 PCI_DMA_TODEVICE); 5316 PCI_DMA_TODEVICE);
5318 5317
@@ -5323,7 +5322,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
5323 for (k = 0; k < last; k++, j++) { 5322 for (k = 0; k < last; k++, j++) {
5324 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)]; 5323 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5325 pci_unmap_page(bp->pdev, 5324 pci_unmap_page(bp->pdev,
5326 pci_unmap_addr(tx_buf, mapping), 5325 dma_unmap_addr(tx_buf, mapping),
5327 skb_shinfo(skb)->frags[k].size, 5326 skb_shinfo(skb)->frags[k].size,
5328 PCI_DMA_TODEVICE); 5327 PCI_DMA_TODEVICE);
5329 } 5328 }
@@ -5353,7 +5352,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
5353 continue; 5352 continue;
5354 5353
5355 pci_unmap_single(bp->pdev, 5354 pci_unmap_single(bp->pdev,
5356 pci_unmap_addr(rx_buf, mapping), 5355 dma_unmap_addr(rx_buf, mapping),
5357 bp->rx_buf_use_size, 5356 bp->rx_buf_use_size,
5358 PCI_DMA_FROMDEVICE); 5357 PCI_DMA_FROMDEVICE);
5359 5358
@@ -5763,7 +5762,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5763 skb_reserve(rx_skb, BNX2_RX_OFFSET); 5762 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5764 5763
5765 pci_dma_sync_single_for_cpu(bp->pdev, 5764 pci_dma_sync_single_for_cpu(bp->pdev,
5766 pci_unmap_addr(rx_buf, mapping), 5765 dma_unmap_addr(rx_buf, mapping),
5767 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5766 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5768 5767
5769 if (rx_hdr->l2_fhdr_status & 5768 if (rx_hdr->l2_fhdr_status &
@@ -6423,7 +6422,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6423 6422
6424 tx_buf = &txr->tx_buf_ring[ring_prod]; 6423 tx_buf = &txr->tx_buf_ring[ring_prod];
6425 tx_buf->skb = skb; 6424 tx_buf->skb = skb;
6426 pci_unmap_addr_set(tx_buf, mapping, mapping); 6425 dma_unmap_addr_set(tx_buf, mapping, mapping);
6427 6426
6428 txbd = &txr->tx_desc_ring[ring_prod]; 6427 txbd = &txr->tx_desc_ring[ring_prod];
6429 6428
@@ -6448,7 +6447,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6448 len, PCI_DMA_TODEVICE); 6447 len, PCI_DMA_TODEVICE);
6449 if (pci_dma_mapping_error(bp->pdev, mapping)) 6448 if (pci_dma_mapping_error(bp->pdev, mapping))
6450 goto dma_error; 6449 goto dma_error;
6451 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping, 6450 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6452 mapping); 6451 mapping);
6453 6452
6454 txbd->tx_bd_haddr_hi = (u64) mapping >> 32; 6453 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
@@ -6485,7 +6484,7 @@ dma_error:
6485 ring_prod = TX_RING_IDX(prod); 6484 ring_prod = TX_RING_IDX(prod);
6486 tx_buf = &txr->tx_buf_ring[ring_prod]; 6485 tx_buf = &txr->tx_buf_ring[ring_prod];
6487 tx_buf->skb = NULL; 6486 tx_buf->skb = NULL;
6488 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6487 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6489 skb_headlen(skb), PCI_DMA_TODEVICE); 6488 skb_headlen(skb), PCI_DMA_TODEVICE);
6490 6489
6491 /* unmap remaining mapped pages */ 6490 /* unmap remaining mapped pages */
@@ -6493,7 +6492,7 @@ dma_error:
6493 prod = NEXT_TX_BD(prod); 6492 prod = NEXT_TX_BD(prod);
6494 ring_prod = TX_RING_IDX(prod); 6493 ring_prod = TX_RING_IDX(prod);
6495 tx_buf = &txr->tx_buf_ring[ring_prod]; 6494 tx_buf = &txr->tx_buf_ring[ring_prod];
6496 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping), 6495 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6497 skb_shinfo(skb)->frags[i].size, 6496 skb_shinfo(skb)->frags[i].size,
6498 PCI_DMA_TODEVICE); 6497 PCI_DMA_TODEVICE);
6499 } 6498 }
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index cd4b0e4637ab..ab34a5d86f86 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6551,17 +6551,17 @@ struct l2_fhdr {
6551 6551
6552struct sw_bd { 6552struct sw_bd {
6553 struct sk_buff *skb; 6553 struct sk_buff *skb;
6554 DECLARE_PCI_UNMAP_ADDR(mapping) 6554 DEFINE_DMA_UNMAP_ADDR(mapping);
6555}; 6555};
6556 6556
6557struct sw_pg { 6557struct sw_pg {
6558 struct page *page; 6558 struct page *page;
6559 DECLARE_PCI_UNMAP_ADDR(mapping) 6559 DEFINE_DMA_UNMAP_ADDR(mapping);
6560}; 6560};
6561 6561
6562struct sw_tx_bd { 6562struct sw_tx_bd {
6563 struct sk_buff *skb; 6563 struct sk_buff *skb;
6564 DECLARE_PCI_UNMAP_ADDR(mapping) 6564 DEFINE_DMA_UNMAP_ADDR(mapping);
6565 unsigned short is_gso; 6565 unsigned short is_gso;
6566 unsigned short nr_frags; 6566 unsigned short nr_frags;
6567}; 6567};
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 3c48a7a68308..081953005b84 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,16 +24,25 @@
24#define BCM_VLAN 1 24#define BCM_VLAN 1
25#endif 25#endif
26 26
27#define BNX2X_MULTI_QUEUE
28
29#define BNX2X_NEW_NAPI
30
31
32
27#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 33#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
28#define BCM_CNIC 1 34#define BCM_CNIC 1
29#include "cnic_if.h" 35#include "cnic_if.h"
30#endif 36#endif
31 37
32#define BNX2X_MULTI_QUEUE
33
34#define BNX2X_NEW_NAPI
35
36 38
39#ifdef BCM_CNIC
40#define BNX2X_MIN_MSIX_VEC_CNT 3
41#define BNX2X_MSIX_VEC_FP_START 2
42#else
43#define BNX2X_MIN_MSIX_VEC_CNT 2
44#define BNX2X_MSIX_VEC_FP_START 1
45#endif
37 46
38#include <linux/mdio.h> 47#include <linux/mdio.h>
39#include "bnx2x_reg.h" 48#include "bnx2x_reg.h"
@@ -83,7 +92,12 @@ do { \
83 __func__, __LINE__, \ 92 __func__, __LINE__, \
84 bp->dev ? (bp->dev->name) : "?", \ 93 bp->dev ? (bp->dev->name) : "?", \
85 ##__args); \ 94 ##__args); \
86} while (0) 95 } while (0)
96
97#define BNX2X_ERROR(__fmt, __args...) do { \
98 pr_err("[%s:%d]" __fmt, __func__, __LINE__, ##__args); \
99 } while (0)
100
87 101
88/* before we have a dev->name use dev_info() */ 102/* before we have a dev->name use dev_info() */
89#define BNX2X_DEV_INFO(__fmt, __args...) \ 103#define BNX2X_DEV_INFO(__fmt, __args...) \
@@ -155,15 +169,21 @@ do { \
155#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field)) 169#define SHMEM2_RD(bp, field) REG_RD(bp, SHMEM2_ADDR(bp, field))
156#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val) 170#define SHMEM2_WR(bp, field, val) REG_WR(bp, SHMEM2_ADDR(bp, field), val)
157 171
172#define MF_CFG_RD(bp, field) SHMEM_RD(bp, mf_cfg.field)
173#define MF_CFG_WR(bp, field, val) SHMEM_WR(bp, mf_cfg.field, val)
174
158#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg) 175#define EMAC_RD(bp, reg) REG_RD(bp, emac_base + reg)
159#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val) 176#define EMAC_WR(bp, reg, val) REG_WR(bp, emac_base + reg, val)
160 177
178#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
179 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
180
161 181
162/* fast path */ 182/* fast path */
163 183
164struct sw_rx_bd { 184struct sw_rx_bd {
165 struct sk_buff *skb; 185 struct sk_buff *skb;
166 DECLARE_PCI_UNMAP_ADDR(mapping) 186 DEFINE_DMA_UNMAP_ADDR(mapping);
167}; 187};
168 188
169struct sw_tx_bd { 189struct sw_tx_bd {
@@ -176,7 +196,7 @@ struct sw_tx_bd {
176 196
177struct sw_rx_page { 197struct sw_rx_page {
178 struct page *page; 198 struct page *page;
179 DECLARE_PCI_UNMAP_ADDR(mapping) 199 DEFINE_DMA_UNMAP_ADDR(mapping);
180}; 200};
181 201
182union db_prod { 202union db_prod {
@@ -261,7 +281,7 @@ struct bnx2x_eth_q_stats {
261 u32 hw_csum_err; 281 u32 hw_csum_err;
262}; 282};
263 283
264#define BNX2X_NUM_Q_STATS 11 284#define BNX2X_NUM_Q_STATS 13
265#define Q_STATS_OFFSET32(stat_name) \ 285#define Q_STATS_OFFSET32(stat_name) \
266 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) 286 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
267 287
@@ -767,7 +787,7 @@ struct bnx2x_eth_stats {
767 u32 nig_timer_max; 787 u32 nig_timer_max;
768}; 788};
769 789
770#define BNX2X_NUM_STATS 41 790#define BNX2X_NUM_STATS 43
771#define STATS_OFFSET32(stat_name) \ 791#define STATS_OFFSET32(stat_name) \
772 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 792 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
773 793
@@ -818,6 +838,12 @@ struct attn_route {
818 u32 sig[4]; 838 u32 sig[4];
819}; 839};
820 840
841typedef enum {
842 BNX2X_RECOVERY_DONE,
843 BNX2X_RECOVERY_INIT,
844 BNX2X_RECOVERY_WAIT,
845} bnx2x_recovery_state_t;
846
821struct bnx2x { 847struct bnx2x {
822 /* Fields used in the tx and intr/napi performance paths 848 /* Fields used in the tx and intr/napi performance paths
823 * are grouped together in the beginning of the structure 849 * are grouped together in the beginning of the structure
@@ -835,6 +861,9 @@ struct bnx2x {
835 struct pci_dev *pdev; 861 struct pci_dev *pdev;
836 862
837 atomic_t intr_sem; 863 atomic_t intr_sem;
864
865 bnx2x_recovery_state_t recovery_state;
866 int is_leader;
838#ifdef BCM_CNIC 867#ifdef BCM_CNIC
839 struct msix_entry msix_table[MAX_CONTEXT+2]; 868 struct msix_entry msix_table[MAX_CONTEXT+2];
840#else 869#else
@@ -842,7 +871,6 @@ struct bnx2x {
842#endif 871#endif
843#define INT_MODE_INTx 1 872#define INT_MODE_INTx 1
844#define INT_MODE_MSI 2 873#define INT_MODE_MSI 2
845#define INT_MODE_MSIX 3
846 874
847 int tx_ring_size; 875 int tx_ring_size;
848 876
@@ -924,8 +952,7 @@ struct bnx2x {
924 int mrrs; 952 int mrrs;
925 953
926 struct delayed_work sp_task; 954 struct delayed_work sp_task;
927 struct work_struct reset_task; 955 struct delayed_work reset_task;
928
929 struct timer_list timer; 956 struct timer_list timer;
930 int current_interval; 957 int current_interval;
931 958
@@ -961,6 +988,8 @@ struct bnx2x {
961 u16 rx_quick_cons_trip; 988 u16 rx_quick_cons_trip;
962 u16 rx_ticks_int; 989 u16 rx_ticks_int;
963 u16 rx_ticks; 990 u16 rx_ticks;
991/* Maximal coalescing timeout in us */
992#define BNX2X_MAX_COALESCE_TOUT (0xf0*12)
964 993
965 u32 lin_cnt; 994 u32 lin_cnt;
966 995
@@ -1075,6 +1104,7 @@ struct bnx2x {
1075#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data) 1104#define INIT_CSEM_INT_TABLE_DATA(bp) (bp->csem_int_table_data)
1076#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data) 1105#define INIT_CSEM_PRAM_DATA(bp) (bp->csem_pram_data)
1077 1106
1107 char fw_ver[32];
1078 const struct firmware *firmware; 1108 const struct firmware *firmware;
1079}; 1109};
1080 1110
@@ -1125,6 +1155,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1125#define LOAD_DIAG 2 1155#define LOAD_DIAG 2
1126#define UNLOAD_NORMAL 0 1156#define UNLOAD_NORMAL 0
1127#define UNLOAD_CLOSE 1 1157#define UNLOAD_CLOSE 1
1158#define UNLOAD_RECOVERY 2
1128 1159
1129 1160
1130/* DMAE command defines */ 1161/* DMAE command defines */
@@ -1152,7 +1183,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1152#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT 1183#define DMAE_CMD_E1HVN_SHIFT DMAE_COMMAND_E1HVN_SHIFT
1153 1184
1154#define DMAE_LEN32_RD_MAX 0x80 1185#define DMAE_LEN32_RD_MAX 0x80
1155#define DMAE_LEN32_WR_MAX 0x400 1186#define DMAE_LEN32_WR_MAX(bp) (CHIP_IS_E1(bp) ? 0x400 : 0x2000)
1156 1187
1157#define DMAE_COMP_VAL 0xe0d0d0ae 1188#define DMAE_COMP_VAL 0xe0d0d0ae
1158 1189
@@ -1294,6 +1325,10 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1294 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ 1325 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \
1295 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) 1326 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR)
1296 1327
1328#define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
1329 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
1330 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
1331 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
1297 1332
1298#define MULTI_FLAGS(bp) \ 1333#define MULTI_FLAGS(bp) \
1299 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \ 1334 (TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY | \
@@ -1333,6 +1368,9 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1333#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0 1368#define PXP2_REG_PXP2_INT_STS PXP2_REG_PXP2_INT_STS_0
1334#endif 1369#endif
1335 1370
1371#define BNX2X_VPD_LEN 128
1372#define VENDOR_ID_LEN 4
1373
1336/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */ 1374/* MISC_REG_RESET_REG - this is here for the hsi to work don't touch */
1337 1375
1338#endif /* bnx2x.h */ 1376#endif /* bnx2x.h */
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index 32e79c359e89..ff70be898765 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1594,7 +1594,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8; 1594 MDIO_AN_REG_ADV_PAUSE_MASK) >> 8;
1595 pause_result |= (lp_pause & 1595 pause_result |= (lp_pause &
1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; 1596 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10;
1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n", 1597 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n",
1598 pause_result); 1598 pause_result);
1599 bnx2x_pause_resolve(vars, pause_result); 1599 bnx2x_pause_resolve(vars, pause_result);
1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE && 1600 if (vars->flow_ctrl == BNX2X_FLOW_CTRL_NONE &&
@@ -1616,7 +1616,7 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; 1616 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7;
1617 1617
1618 bnx2x_pause_resolve(vars, pause_result); 1618 bnx2x_pause_resolve(vars, pause_result);
1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x \n", 1619 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n",
1620 pause_result); 1620 pause_result);
1621 } 1621 }
1622 } 1622 }
@@ -1974,7 +1974,7 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1974 } 1974 }
1975 } 1975 }
1976 1976
1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x \n", 1977 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %x line_speed %x\n",
1978 gp_status, vars->phy_link_up, vars->line_speed); 1978 gp_status, vars->phy_link_up, vars->line_speed);
1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x" 1979 DP(NETIF_MSG_LINK, "duplex %x flow_ctrl 0x%x"
1980 " autoneg 0x%x\n", 1980 " autoneg 0x%x\n",
@@ -3852,7 +3852,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3852 SPEED_AUTO_NEG) && 3852 SPEED_AUTO_NEG) &&
3853 ((params->speed_cap_mask & 3853 ((params->speed_cap_mask &
3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { 3854 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
3855 DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); 3855 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
3856 bnx2x_cl45_write(bp, params->port, ext_phy_type, 3856 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3857 ext_phy_addr, MDIO_AN_DEVAD, 3857 ext_phy_addr, MDIO_AN_DEVAD,
3858 MDIO_AN_REG_ADV, 0x20); 3858 MDIO_AN_REG_ADV, 0x20);
@@ -4234,14 +4234,14 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4234 ext_phy_addr, 4234 ext_phy_addr,
4235 MDIO_PMA_DEVAD, 4235 MDIO_PMA_DEVAD,
4236 MDIO_PMA_REG_10G_CTRL2, &tmp1); 4236 MDIO_PMA_REG_10G_CTRL2, &tmp1);
4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x \n", tmp1); 4237 DP(NETIF_MSG_LINK, "1.7 = 0x%x\n", tmp1);
4238 4238
4239 } else if ((params->req_line_speed == 4239 } else if ((params->req_line_speed ==
4240 SPEED_AUTO_NEG) && 4240 SPEED_AUTO_NEG) &&
4241 ((params->speed_cap_mask & 4241 ((params->speed_cap_mask &
4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) { 4242 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))) {
4243 4243
4244 DP(NETIF_MSG_LINK, "Setting 1G clause37 \n"); 4244 DP(NETIF_MSG_LINK, "Setting 1G clause37\n");
4245 bnx2x_cl45_write(bp, params->port, ext_phy_type, 4245 bnx2x_cl45_write(bp, params->port, ext_phy_type,
4246 ext_phy_addr, MDIO_AN_DEVAD, 4246 ext_phy_addr, MDIO_AN_DEVAD,
4247 MDIO_PMA_REG_8727_MISC_CTRL, 0); 4247 MDIO_PMA_REG_8727_MISC_CTRL, 0);
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 6c042a72d6cc..0c6dba24e37e 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -57,8 +57,8 @@
57#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
58#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
59 59
60#define DRV_MODULE_VERSION "1.52.1-7" 60#define DRV_MODULE_VERSION "1.52.53-1"
61#define DRV_MODULE_RELDATE "2010/02/28" 61#define DRV_MODULE_RELDATE "2010/18/04"
62#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
63 63
64#include <linux/firmware.h> 64#include <linux/firmware.h>
@@ -102,7 +102,8 @@ MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102 102
103static int int_mode; 103static int int_mode;
104module_param(int_mode, int, 0); 104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)"); 105MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106 "(1 INT#x; 2 MSI)");
106 107
107static int dropless_fc; 108static int dropless_fc;
108module_param(dropless_fc, int, 0); 109module_param(dropless_fc, int, 0);
@@ -352,13 +353,14 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr, 353void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len) 354 u32 addr, u32 len)
354{ 355{
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
355 int offset = 0; 357 int offset = 0;
356 358
357 while (len > DMAE_LEN32_WR_MAX) { 359 while (len > dmae_wr_max) {
358 bnx2x_write_dmae(bp, phys_addr + offset, 360 bnx2x_write_dmae(bp, phys_addr + offset,
359 addr + offset, DMAE_LEN32_WR_MAX); 361 addr + offset, dmae_wr_max);
360 offset += DMAE_LEN32_WR_MAX * 4; 362 offset += dmae_wr_max * 4;
361 len -= DMAE_LEN32_WR_MAX; 363 len -= dmae_wr_max;
362 } 364 }
363 365
364 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len); 366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
@@ -508,26 +510,31 @@ static int bnx2x_mc_assert(struct bnx2x *bp)
508 510
509static void bnx2x_fw_dump(struct bnx2x *bp) 511static void bnx2x_fw_dump(struct bnx2x *bp)
510{ 512{
513 u32 addr;
511 u32 mark, offset; 514 u32 mark, offset;
512 __be32 data[9]; 515 __be32 data[9];
513 int word; 516 int word;
514 517
515 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104); 518 if (BP_NOMCP(bp)) {
516 mark = ((mark + 0x3) & ~0x3); 519 BNX2X_ERR("NO MCP - can not dump\n");
520 return;
521 }
522
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
517 pr_err("begin fw dump (mark 0x%x)\n", mark); 526 pr_err("begin fw dump (mark 0x%x)\n", mark);
518 527
519 pr_err(""); 528 pr_err("");
520 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) { 529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
521 for (word = 0; word < 8; word++) 530 for (word = 0; word < 8; word++)
522 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 531 data[word] = htonl(REG_RD(bp, offset + 4*word));
523 offset + 4*word));
524 data[8] = 0x0; 532 data[8] = 0x0;
525 pr_cont("%s", (char *)data); 533 pr_cont("%s", (char *)data);
526 } 534 }
527 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) { 535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
528 for (word = 0; word < 8; word++) 536 for (word = 0; word < 8; word++)
529 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH + 537 data[word] = htonl(REG_RD(bp, offset + 4*word));
530 offset + 4*word));
531 data[8] = 0x0; 538 data[8] = 0x0;
532 pr_cont("%s", (char *)data); 539 pr_cont("%s", (char *)data);
533 } 540 }
@@ -546,9 +553,9 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
546 553
547 /* Indices */ 554 /* Indices */
548 /* Common */ 555 /* Common */
549 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)" 556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
550 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)" 557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
551 " spq_prod_idx(%u)\n", 558 " spq_prod_idx(0x%x)\n",
552 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx, 559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
553 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx); 560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
554 561
@@ -556,14 +563,14 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
556 for_each_queue(bp, i) { 563 for_each_queue(bp, i) {
557 struct bnx2x_fastpath *fp = &bp->fp[i]; 564 struct bnx2x_fastpath *fp = &bp->fp[i];
558 565
559 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)" 566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
560 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)" 567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
561 " rx_comp_cons(%x) *rx_cons_sb(%x)\n", 568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
562 i, fp->rx_bd_prod, fp->rx_bd_cons, 569 i, fp->rx_bd_prod, fp->rx_bd_cons,
563 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod, 570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
564 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb)); 571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
565 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)" 572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
566 " fp_u_idx(%x) *sb_u_idx(%x)\n", 573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
567 fp->rx_sge_prod, fp->last_max_sge, 574 fp->rx_sge_prod, fp->last_max_sge,
568 le16_to_cpu(fp->fp_u_idx), 575 le16_to_cpu(fp->fp_u_idx),
569 fp->status_blk->u_status_block.status_block_index); 576 fp->status_blk->u_status_block.status_block_index);
@@ -573,12 +580,13 @@ static void bnx2x_panic_dump(struct bnx2x *bp)
573 for_each_queue(bp, i) { 580 for_each_queue(bp, i) {
574 struct bnx2x_fastpath *fp = &bp->fp[i]; 581 struct bnx2x_fastpath *fp = &bp->fp[i];
575 582
576 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)" 583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
577 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n", 584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
578 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod, 586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
579 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb)); 587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
580 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)" 588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
581 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx), 589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
582 fp->status_blk->c_status_block.status_block_index, 590 fp->status_blk->c_status_block.status_block_index,
583 fp->tx_db.data.prod); 591 fp->tx_db.data.prod);
584 } 592 }
@@ -764,6 +772,40 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
764 * General service functions 772 * General service functions
765 */ 773 */
766 774
775/* Return true if succeeded to acquire the lock */
776static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777{
778 u32 lock_status;
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
782
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787 DP(NETIF_MSG_HW,
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
790 return -EINVAL;
791 }
792
793 if (func <= 5)
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795 else
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
803 return true;
804
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806 return false;
807}
808
767static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id, 809static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
768 u8 storm, u16 index, u8 op, u8 update) 810 u8 storm, u16 index, u8 op, u8 update)
769{ 811{
@@ -842,7 +884,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
842 /* unmap first bd */ 884 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); 885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd; 886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd), 887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE); 888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
847 889
848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1; 890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
@@ -872,8 +914,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
872 914
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx); 915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd; 916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd), 917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE); 918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
877 if (--nbd) 919 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); 920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 } 921 }
@@ -1023,7 +1065,8 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1023 1065
1024 default: 1066 default:
1025 BNX2X_ERR("unexpected MC reply (%d) " 1067 BNX2X_ERR("unexpected MC reply (%d) "
1026 "fp->state is %x\n", command, fp->state); 1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1027 break; 1070 break;
1028 } 1071 }
1029 mb(); /* force bnx2x_wait_ramrod() to see the change */ 1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
@@ -1086,7 +1129,7 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1086 if (!page) 1129 if (!page)
1087 return; 1130 return;
1088 1131
1089 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping), 1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1090 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1091 __free_pages(page, PAGES_PER_SGE_SHIFT); 1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1092 1135
@@ -1115,15 +1158,15 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1115 if (unlikely(page == NULL)) 1158 if (unlikely(page == NULL))
1116 return -ENOMEM; 1159 return -ENOMEM;
1117 1160
1118 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE, 1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1119 PCI_DMA_FROMDEVICE); 1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1120 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1121 __free_pages(page, PAGES_PER_SGE_SHIFT); 1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1122 return -ENOMEM; 1165 return -ENOMEM;
1123 } 1166 }
1124 1167
1125 sw_buf->page = page; 1168 sw_buf->page = page;
1126 pci_unmap_addr_set(sw_buf, mapping, mapping); 1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1127 1170
1128 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); 1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1129 sge->addr_lo = cpu_to_le32(U64_LO(mapping)); 1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1143,15 +1186,15 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1143 if (unlikely(skb == NULL)) 1186 if (unlikely(skb == NULL))
1144 return -ENOMEM; 1187 return -ENOMEM;
1145 1188
1146 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size, 1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1147 PCI_DMA_FROMDEVICE); 1190 DMA_FROM_DEVICE);
1148 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1149 dev_kfree_skb(skb); 1192 dev_kfree_skb(skb);
1150 return -ENOMEM; 1193 return -ENOMEM;
1151 } 1194 }
1152 1195
1153 rx_buf->skb = skb; 1196 rx_buf->skb = skb;
1154 pci_unmap_addr_set(rx_buf, mapping, mapping); 1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1155 1198
1156 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1157 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -1173,13 +1216,13 @@ static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1173 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons]; 1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1174 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod]; 1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1175 1218
1176 pci_dma_sync_single_for_device(bp->pdev, 1219 dma_sync_single_for_device(&bp->pdev->dev,
1177 pci_unmap_addr(cons_rx_buf, mapping), 1220 dma_unmap_addr(cons_rx_buf, mapping),
1178 RX_COPY_THRESH, PCI_DMA_FROMDEVICE); 1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1179 1222
1180 prod_rx_buf->skb = cons_rx_buf->skb; 1223 prod_rx_buf->skb = cons_rx_buf->skb;
1181 pci_unmap_addr_set(prod_rx_buf, mapping, 1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1182 pci_unmap_addr(cons_rx_buf, mapping)); 1225 dma_unmap_addr(cons_rx_buf, mapping));
1183 *prod_bd = *cons_bd; 1226 *prod_bd = *cons_bd;
1184} 1227}
1185 1228
@@ -1283,9 +1326,9 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1283 1326
1284 /* move empty skb from pool to prod and map it */ 1327 /* move empty skb from pool to prod and map it */
1285 prod_rx_buf->skb = fp->tpa_pool[queue].skb; 1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1286 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data, 1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1287 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1288 pci_unmap_addr_set(prod_rx_buf, mapping, mapping); 1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1289 1332
1290 /* move partial skb from cons to pool (don't unmap yet) */ 1333 /* move partial skb from cons to pool (don't unmap yet) */
1291 fp->tpa_pool[queue] = *cons_rx_buf; 1334 fp->tpa_pool[queue] = *cons_rx_buf;
@@ -1302,7 +1345,7 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1302 1345
1303#ifdef BNX2X_STOP_ON_ERROR 1346#ifdef BNX2X_STOP_ON_ERROR
1304 fp->tpa_queue_used |= (1 << queue); 1347 fp->tpa_queue_used |= (1 << queue);
1305#ifdef __powerpc64__ 1348#ifdef _ASM_GENERIC_INT_L64_H
1306 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n", 1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1307#else 1350#else
1308 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n", 1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
@@ -1331,8 +1374,7 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1331 max(frag_size, (u32)len_on_bd)); 1374 max(frag_size, (u32)len_on_bd));
1332 1375
1333#ifdef BNX2X_STOP_ON_ERROR 1376#ifdef BNX2X_STOP_ON_ERROR
1334 if (pages > 1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1335 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1336 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n", 1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1337 pages, cqe_idx); 1379 pages, cqe_idx);
1338 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n", 1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
@@ -1361,8 +1403,9 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1361 } 1403 }
1362 1404
1363 /* Unmap the page as we r going to pass it to the stack */ 1405 /* Unmap the page as we r going to pass it to the stack */
1364 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping), 1406 dma_unmap_page(&bp->pdev->dev,
1365 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE); 1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1366 1409
1367 /* Add one frag and update the appropriate fields in the skb */ 1410 /* Add one frag and update the appropriate fields in the skb */
1368 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len); 1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
@@ -1389,8 +1432,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1389 /* Unmap skb in the pool anyway, as we are going to change 1432 /* Unmap skb in the pool anyway, as we are going to change
1390 pool entry status to BNX2X_TPA_STOP even if new skb allocation 1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1391 fails. */ 1434 fails. */
1392 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping), 1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1393 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1394 1437
1395 if (likely(new_skb)) { 1438 if (likely(new_skb)) {
1396 /* fix ip xsum and give it to the stack */ 1439 /* fix ip xsum and give it to the stack */
@@ -1441,12 +1484,12 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1441#ifdef BCM_VLAN 1484#ifdef BCM_VLAN
1442 if ((bp->vlgrp != NULL) && is_vlan_cqe && 1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1443 (!is_not_hwaccel_vlan_cqe)) 1486 (!is_not_hwaccel_vlan_cqe))
1444 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1445 le16_to_cpu(cqe->fast_path_cqe. 1488 le16_to_cpu(cqe->fast_path_cqe.
1446 vlan_tag)); 1489 vlan_tag), skb);
1447 else 1490 else
1448#endif 1491#endif
1449 netif_receive_skb(skb); 1492 napi_gro_receive(&fp->napi, skb);
1450 } else { 1493 } else {
1451 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages" 1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1452 " - dropping packet!\n"); 1495 " - dropping packet!\n");
@@ -1620,10 +1663,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1620 } 1663 }
1621 } 1664 }
1622 1665
1623 pci_dma_sync_single_for_device(bp->pdev, 1666 dma_sync_single_for_device(&bp->pdev->dev,
1624 pci_unmap_addr(rx_buf, mapping), 1667 dma_unmap_addr(rx_buf, mapping),
1625 pad + RX_COPY_THRESH, 1668 pad + RX_COPY_THRESH,
1626 PCI_DMA_FROMDEVICE); 1669 DMA_FROM_DEVICE);
1627 prefetch(skb); 1670 prefetch(skb);
1628 prefetch(((char *)(skb)) + 128); 1671 prefetch(((char *)(skb)) + 128);
1629 1672
@@ -1665,10 +1708,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1665 1708
1666 } else 1709 } else
1667 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) { 1710 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1668 pci_unmap_single(bp->pdev, 1711 dma_unmap_single(&bp->pdev->dev,
1669 pci_unmap_addr(rx_buf, mapping), 1712 dma_unmap_addr(rx_buf, mapping),
1670 bp->rx_buf_size, 1713 bp->rx_buf_size,
1671 PCI_DMA_FROMDEVICE); 1714 DMA_FROM_DEVICE);
1672 skb_reserve(skb, pad); 1715 skb_reserve(skb, pad);
1673 skb_put(skb, len); 1716 skb_put(skb, len);
1674 1717
@@ -1699,11 +1742,11 @@ reuse_rx:
1699 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) && 1742 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1700 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) & 1743 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1701 PARSING_FLAGS_VLAN)) 1744 PARSING_FLAGS_VLAN))
1702 vlan_hwaccel_receive_skb(skb, bp->vlgrp, 1745 vlan_gro_receive(&fp->napi, bp->vlgrp,
1703 le16_to_cpu(cqe->fast_path_cqe.vlan_tag)); 1746 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1704 else 1747 else
1705#endif 1748#endif
1706 netif_receive_skb(skb); 1749 napi_gro_receive(&fp->napi, skb);
1707 1750
1708 1751
1709next_rx: 1752next_rx:
@@ -1831,8 +1874,8 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1831 return IRQ_HANDLED; 1874 return IRQ_HANDLED;
1832 } 1875 }
1833 1876
1834 if (status) 1877 if (unlikely(status))
1835 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n", 1878 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1836 status); 1879 status);
1837 1880
1838 return IRQ_HANDLED; 1881 return IRQ_HANDLED;
@@ -1900,6 +1943,8 @@ static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1900 int func = BP_FUNC(bp); 1943 int func = BP_FUNC(bp);
1901 u32 hw_lock_control_reg; 1944 u32 hw_lock_control_reg;
1902 1945
1946 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1947
1903 /* Validating that the resource is within range */ 1948 /* Validating that the resource is within range */
1904 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { 1949 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1905 DP(NETIF_MSG_HW, 1950 DP(NETIF_MSG_HW,
@@ -2254,11 +2299,14 @@ static void bnx2x__link_reset(struct bnx2x *bp)
2254 2299
2255static u8 bnx2x_link_test(struct bnx2x *bp) 2300static u8 bnx2x_link_test(struct bnx2x *bp)
2256{ 2301{
2257 u8 rc; 2302 u8 rc = 0;
2258 2303
2259 bnx2x_acquire_phy_lock(bp); 2304 if (!BP_NOMCP(bp)) {
2260 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars); 2305 bnx2x_acquire_phy_lock(bp);
2261 bnx2x_release_phy_lock(bp); 2306 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2307 bnx2x_release_phy_lock(bp);
2308 } else
2309 BNX2X_ERR("Bootcode is missing - can not test link\n");
2262 2310
2263 return rc; 2311 return rc;
2264} 2312}
@@ -2387,10 +2435,10 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2387 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater 2435 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2388 than zero */ 2436 than zero */
2389 m_fair_vn.vn_credit_delta = 2437 m_fair_vn.vn_credit_delta =
2390 max((u32)(vn_min_rate * (T_FAIR_COEF / 2438 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2391 (8 * bp->vn_weight_sum))), 2439 (8 * bp->vn_weight_sum))),
2392 (u32)(bp->cmng.fair_vars.fair_threshold * 2)); 2440 (bp->cmng.fair_vars.fair_threshold * 2));
2393 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n", 2441 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2394 m_fair_vn.vn_credit_delta); 2442 m_fair_vn.vn_credit_delta);
2395 } 2443 }
2396 2444
@@ -2410,6 +2458,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2410/* This function is called upon link interrupt */ 2458/* This function is called upon link interrupt */
2411static void bnx2x_link_attn(struct bnx2x *bp) 2459static void bnx2x_link_attn(struct bnx2x *bp)
2412{ 2460{
2461 u32 prev_link_status = bp->link_vars.link_status;
2413 /* Make sure that we are synced with the current statistics */ 2462 /* Make sure that we are synced with the current statistics */
2414 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2463 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2415 2464
@@ -2442,8 +2491,9 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2442 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2491 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2443 } 2492 }
2444 2493
2445 /* indicate link status */ 2494 /* indicate link status only if link status actually changed */
2446 bnx2x_link_report(bp); 2495 if (prev_link_status != bp->link_vars.link_status)
2496 bnx2x_link_report(bp);
2447 2497
2448 if (IS_E1HMF(bp)) { 2498 if (IS_E1HMF(bp)) {
2449 int port = BP_PORT(bp); 2499 int port = BP_PORT(bp);
@@ -2560,7 +2610,6 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2560 return rc; 2610 return rc;
2561} 2611}
2562 2612
2563static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2564static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set); 2613static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2565static void bnx2x_set_rx_mode(struct net_device *dev); 2614static void bnx2x_set_rx_mode(struct net_device *dev);
2566 2615
@@ -2696,12 +2745,6 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2696{ 2745{
2697 struct eth_spe *spe; 2746 struct eth_spe *spe;
2698 2747
2699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2700 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2701 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2702 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2703 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2704
2705#ifdef BNX2X_STOP_ON_ERROR 2748#ifdef BNX2X_STOP_ON_ERROR
2706 if (unlikely(bp->panic)) 2749 if (unlikely(bp->panic))
2707 return -EIO; 2750 return -EIO;
@@ -2720,8 +2763,8 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2720 2763
2721 /* CID needs port number to be encoded int it */ 2764 /* CID needs port number to be encoded int it */
2722 spe->hdr.conn_and_cmd_data = 2765 spe->hdr.conn_and_cmd_data =
2723 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2766 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2724 HW_CID(bp, cid))); 2767 HW_CID(bp, cid));
2725 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2768 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2726 if (common) 2769 if (common)
2727 spe->hdr.type |= 2770 spe->hdr.type |=
@@ -2732,6 +2775,13 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2732 2775
2733 bp->spq_left--; 2776 bp->spq_left--;
2734 2777
2778 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2779 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2780 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2781 (u32)(U64_LO(bp->spq_mapping) +
2782 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2783 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2784
2735 bnx2x_sp_prod_update(bp); 2785 bnx2x_sp_prod_update(bp);
2736 spin_unlock_bh(&bp->spq_lock); 2786 spin_unlock_bh(&bp->spq_lock);
2737 return 0; 2787 return 0;
@@ -2740,12 +2790,11 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2740/* acquire split MCP access lock register */ 2790/* acquire split MCP access lock register */
2741static int bnx2x_acquire_alr(struct bnx2x *bp) 2791static int bnx2x_acquire_alr(struct bnx2x *bp)
2742{ 2792{
2743 u32 i, j, val; 2793 u32 j, val;
2744 int rc = 0; 2794 int rc = 0;
2745 2795
2746 might_sleep(); 2796 might_sleep();
2747 i = 100; 2797 for (j = 0; j < 1000; j++) {
2748 for (j = 0; j < i*10; j++) {
2749 val = (1UL << 31); 2798 val = (1UL << 31);
2750 REG_WR(bp, GRCBASE_MCP + 0x9c, val); 2799 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2751 val = REG_RD(bp, GRCBASE_MCP + 0x9c); 2800 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
@@ -2765,9 +2814,7 @@ static int bnx2x_acquire_alr(struct bnx2x *bp)
2765/* release split MCP access lock register */ 2814/* release split MCP access lock register */
2766static void bnx2x_release_alr(struct bnx2x *bp) 2815static void bnx2x_release_alr(struct bnx2x *bp)
2767{ 2816{
2768 u32 val = 0; 2817 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2769
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771} 2818}
2772 2819
2773static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp) 2820static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
@@ -2823,7 +2870,7 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2823 2870
2824 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n", 2871 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2825 aeu_mask, asserted); 2872 aeu_mask, asserted);
2826 aeu_mask &= ~(asserted & 0xff); 2873 aeu_mask &= ~(asserted & 0x3ff);
2827 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 2874 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2828 2875
2829 REG_WR(bp, aeu_addr, aeu_mask); 2876 REG_WR(bp, aeu_addr, aeu_mask);
@@ -2910,8 +2957,9 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
2910 bp->link_params.ext_phy_config); 2957 bp->link_params.ext_phy_config);
2911 2958
2912 /* log the failure */ 2959 /* log the failure */
2913 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n" 2960 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2914 "Please contact Dell Support for assistance.\n"); 2961 " the driver to shutdown the card to prevent permanent"
2962 " damage. Please contact OEM Support for assistance\n");
2915} 2963}
2916 2964
2917static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn) 2965static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -3104,10 +3152,311 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3104 } 3152 }
3105} 3153}
3106 3154
3107static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted) 3155static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3156static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3157
3158
3159#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3160#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3161#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3162#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3163#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3164#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3165/*
3166 * should be run under rtnl lock
3167 */
3168static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3169{
3170 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3171 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3172 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3173 barrier();
3174 mmiowb();
3175}
3176
3177/*
3178 * should be run under rtnl lock
3179 */
3180static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3181{
3182 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3183 val |= (1 << 16);
3184 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3185 barrier();
3186 mmiowb();
3187}
3188
3189/*
3190 * should be run under rtnl lock
3191 */
3192static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3193{
3194 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3195 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3196 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3197}
3198
3199/*
3200 * should be run under rtnl lock
3201 */
3202static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3203{
3204 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3205
3206 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3207
3208 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3209 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3210 barrier();
3211 mmiowb();
3212}
3213
3214/*
3215 * should be run under rtnl lock
3216 */
3217static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3218{
3219 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3220
3221 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3222
3223 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3224 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3225 barrier();
3226 mmiowb();
3227
3228 return val1;
3229}
3230
3231/*
3232 * should be run under rtnl lock
3233 */
3234static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3235{
3236 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3237}
3238
3239static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3240{
3241 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3242 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3243}
3244
3245static inline void _print_next_block(int idx, const char *blk)
3246{
3247 if (idx)
3248 pr_cont(", ");
3249 pr_cont("%s", blk);
3250}
3251
3252static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3253{
3254 int i = 0;
3255 u32 cur_bit = 0;
3256 for (i = 0; sig; i++) {
3257 cur_bit = ((u32)0x1 << i);
3258 if (sig & cur_bit) {
3259 switch (cur_bit) {
3260 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3261 _print_next_block(par_num++, "BRB");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3264 _print_next_block(par_num++, "PARSER");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3267 _print_next_block(par_num++, "TSDM");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3270 _print_next_block(par_num++, "SEARCHER");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3273 _print_next_block(par_num++, "TSEMI");
3274 break;
3275 }
3276
3277 /* Clear the bit */
3278 sig &= ~cur_bit;
3279 }
3280 }
3281
3282 return par_num;
3283}
3284
3285static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3286{
3287 int i = 0;
3288 u32 cur_bit = 0;
3289 for (i = 0; sig; i++) {
3290 cur_bit = ((u32)0x1 << i);
3291 if (sig & cur_bit) {
3292 switch (cur_bit) {
3293 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3294 _print_next_block(par_num++, "PBCLIENT");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3297 _print_next_block(par_num++, "QM");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3300 _print_next_block(par_num++, "XSDM");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "XSEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3306 _print_next_block(par_num++, "DOORBELLQ");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3309 _print_next_block(par_num++, "VAUX PCI CORE");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3312 _print_next_block(par_num++, "DEBUG");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3315 _print_next_block(par_num++, "USDM");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3318 _print_next_block(par_num++, "USEMI");
3319 break;
3320 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3321 _print_next_block(par_num++, "UPB");
3322 break;
3323 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3324 _print_next_block(par_num++, "CSDM");
3325 break;
3326 }
3327
3328 /* Clear the bit */
3329 sig &= ~cur_bit;
3330 }
3331 }
3332
3333 return par_num;
3334}
3335
3336static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3337{
3338 int i = 0;
3339 u32 cur_bit = 0;
3340 for (i = 0; sig; i++) {
3341 cur_bit = ((u32)0x1 << i);
3342 if (sig & cur_bit) {
3343 switch (cur_bit) {
3344 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3345 _print_next_block(par_num++, "CSEMI");
3346 break;
3347 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3348 _print_next_block(par_num++, "PXP");
3349 break;
3350 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3351 _print_next_block(par_num++,
3352 "PXPPCICLOCKCLIENT");
3353 break;
3354 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3355 _print_next_block(par_num++, "CFC");
3356 break;
3357 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3358 _print_next_block(par_num++, "CDU");
3359 break;
3360 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3361 _print_next_block(par_num++, "IGU");
3362 break;
3363 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3364 _print_next_block(par_num++, "MISC");
3365 break;
3366 }
3367
3368 /* Clear the bit */
3369 sig &= ~cur_bit;
3370 }
3371 }
3372
3373 return par_num;
3374}
3375
3376static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3377{
3378 int i = 0;
3379 u32 cur_bit = 0;
3380 for (i = 0; sig; i++) {
3381 cur_bit = ((u32)0x1 << i);
3382 if (sig & cur_bit) {
3383 switch (cur_bit) {
3384 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3385 _print_next_block(par_num++, "MCP ROM");
3386 break;
3387 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3388 _print_next_block(par_num++, "MCP UMP RX");
3389 break;
3390 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3391 _print_next_block(par_num++, "MCP UMP TX");
3392 break;
3393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3394 _print_next_block(par_num++, "MCP SCPAD");
3395 break;
3396 }
3397
3398 /* Clear the bit */
3399 sig &= ~cur_bit;
3400 }
3401 }
3402
3403 return par_num;
3404}
3405
3406static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3407 u32 sig2, u32 sig3)
3408{
3409 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3410 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3411 int par_num = 0;
3412 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3413 "[0]:0x%08x [1]:0x%08x "
3414 "[2]:0x%08x [3]:0x%08x\n",
3415 sig0 & HW_PRTY_ASSERT_SET_0,
3416 sig1 & HW_PRTY_ASSERT_SET_1,
3417 sig2 & HW_PRTY_ASSERT_SET_2,
3418 sig3 & HW_PRTY_ASSERT_SET_3);
3419 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3420 bp->dev->name);
3421 par_num = bnx2x_print_blocks_with_parity0(
3422 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3423 par_num = bnx2x_print_blocks_with_parity1(
3424 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3425 par_num = bnx2x_print_blocks_with_parity2(
3426 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3427 par_num = bnx2x_print_blocks_with_parity3(
3428 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3429 printk("\n");
3430 return true;
3431 } else
3432 return false;
3433}
3434
3435static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3108{ 3436{
3109 struct attn_route attn; 3437 struct attn_route attn;
3110 struct attn_route group_mask; 3438 int port = BP_PORT(bp);
3439
3440 attn.sig[0] = REG_RD(bp,
3441 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3442 port*4);
3443 attn.sig[1] = REG_RD(bp,
3444 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3445 port*4);
3446 attn.sig[2] = REG_RD(bp,
3447 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3448 port*4);
3449 attn.sig[3] = REG_RD(bp,
3450 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3451 port*4);
3452
3453 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3454 attn.sig[3]);
3455}
3456
3457static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3458{
3459 struct attn_route attn, *group_mask;
3111 int port = BP_PORT(bp); 3460 int port = BP_PORT(bp);
3112 int index; 3461 int index;
3113 u32 reg_addr; 3462 u32 reg_addr;
@@ -3118,6 +3467,19 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3118 try to handle this event */ 3467 try to handle this event */
3119 bnx2x_acquire_alr(bp); 3468 bnx2x_acquire_alr(bp);
3120 3469
3470 if (bnx2x_chk_parity_attn(bp)) {
3471 bp->recovery_state = BNX2X_RECOVERY_INIT;
3472 bnx2x_set_reset_in_progress(bp);
3473 schedule_delayed_work(&bp->reset_task, 0);
3474 /* Disable HW interrupts */
3475 bnx2x_int_disable(bp);
3476 bnx2x_release_alr(bp);
3477 /* In case of parity errors don't handle attentions so that
3478 * other function would "see" parity errors.
3479 */
3480 return;
3481 }
3482
3121 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4); 3483 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3122 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4); 3484 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3123 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); 3485 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
@@ -3127,28 +3489,20 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3127 3489
3128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { 3490 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3129 if (deasserted & (1 << index)) { 3491 if (deasserted & (1 << index)) {
3130 group_mask = bp->attn_group[index]; 3492 group_mask = &bp->attn_group[index];
3131 3493
3132 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n", 3494 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3133 index, group_mask.sig[0], group_mask.sig[1], 3495 index, group_mask->sig[0], group_mask->sig[1],
3134 group_mask.sig[2], group_mask.sig[3]); 3496 group_mask->sig[2], group_mask->sig[3]);
3135 3497
3136 bnx2x_attn_int_deasserted3(bp, 3498 bnx2x_attn_int_deasserted3(bp,
3137 attn.sig[3] & group_mask.sig[3]); 3499 attn.sig[3] & group_mask->sig[3]);
3138 bnx2x_attn_int_deasserted1(bp, 3500 bnx2x_attn_int_deasserted1(bp,
3139 attn.sig[1] & group_mask.sig[1]); 3501 attn.sig[1] & group_mask->sig[1]);
3140 bnx2x_attn_int_deasserted2(bp, 3502 bnx2x_attn_int_deasserted2(bp,
3141 attn.sig[2] & group_mask.sig[2]); 3503 attn.sig[2] & group_mask->sig[2]);
3142 bnx2x_attn_int_deasserted0(bp, 3504 bnx2x_attn_int_deasserted0(bp,
3143 attn.sig[0] & group_mask.sig[0]); 3505 attn.sig[0] & group_mask->sig[0]);
3144
3145 if ((attn.sig[0] & group_mask.sig[0] &
3146 HW_PRTY_ASSERT_SET_0) ||
3147 (attn.sig[1] & group_mask.sig[1] &
3148 HW_PRTY_ASSERT_SET_1) ||
3149 (attn.sig[2] & group_mask.sig[2] &
3150 HW_PRTY_ASSERT_SET_2))
3151 BNX2X_ERR("FATAL HW block parity attention\n");
3152 } 3506 }
3153 } 3507 }
3154 3508
@@ -3172,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3172 3526
3173 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n", 3527 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3174 aeu_mask, deasserted); 3528 aeu_mask, deasserted);
3175 aeu_mask |= (deasserted & 0xff); 3529 aeu_mask |= (deasserted & 0x3ff);
3176 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask); 3530 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3177 3531
3178 REG_WR(bp, reg_addr, aeu_mask); 3532 REG_WR(bp, reg_addr, aeu_mask);
@@ -3216,7 +3570,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3216 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work); 3570 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3217 u16 status; 3571 u16 status;
3218 3572
3219
3220 /* Return here if interrupt is disabled */ 3573 /* Return here if interrupt is disabled */
3221 if (unlikely(atomic_read(&bp->intr_sem) != 0)) { 3574 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3222 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n"); 3575 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
@@ -3227,11 +3580,23 @@ static void bnx2x_sp_task(struct work_struct *work)
3227/* if (status == 0) */ 3580/* if (status == 0) */
3228/* BNX2X_ERR("spurious slowpath interrupt!\n"); */ 3581/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3229 3582
3230 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status); 3583 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3231 3584
3232 /* HW attentions */ 3585 /* HW attentions */
3233 if (status & 0x1) 3586 if (status & 0x1) {
3234 bnx2x_attn_int(bp); 3587 bnx2x_attn_int(bp);
3588 status &= ~0x1;
3589 }
3590
3591 /* CStorm events: STAT_QUERY */
3592 if (status & 0x2) {
3593 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3594 status &= ~0x2;
3595 }
3596
3597 if (unlikely(status))
3598 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3599 status);
3235 3600
3236 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx), 3601 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3237 IGU_INT_NOP, 1); 3602 IGU_INT_NOP, 1);
@@ -3243,7 +3608,6 @@ static void bnx2x_sp_task(struct work_struct *work)
3243 IGU_INT_NOP, 1); 3608 IGU_INT_NOP, 1);
3244 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx), 3609 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3245 IGU_INT_ENABLE, 1); 3610 IGU_INT_ENABLE, 1);
3246
3247} 3611}
3248 3612
3249static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance) 3613static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
@@ -3947,7 +4311,6 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3947 u32 lo; 4311 u32 lo;
3948 u32 hi; 4312 u32 hi;
3949 } diff; 4313 } diff;
3950 u32 nig_timer_max;
3951 4314
3952 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) 4315 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3953 bnx2x_bmac_stats_update(bp); 4316 bnx2x_bmac_stats_update(bp);
@@ -3978,10 +4341,14 @@ static int bnx2x_hw_stats_update(struct bnx2x *bp)
3978 4341
3979 pstats->host_port_stats_start = ++pstats->host_port_stats_end; 4342 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3980 4343
3981 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); 4344 if (!BP_NOMCP(bp)) {
3982 if (nig_timer_max != estats->nig_timer_max) { 4345 u32 nig_timer_max =
3983 estats->nig_timer_max = nig_timer_max; 4346 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3984 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max); 4347 if (nig_timer_max != estats->nig_timer_max) {
4348 estats->nig_timer_max = nig_timer_max;
4349 BNX2X_ERR("NIG timer max (%u)\n",
4350 estats->nig_timer_max);
4351 }
3985 } 4352 }
3986 4353
3987 return 0; 4354 return 0;
@@ -4025,21 +4392,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4025 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) != 4392 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4026 bp->stats_counter) { 4393 bp->stats_counter) {
4027 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm" 4394 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4028 " xstorm counter (%d) != stats_counter (%d)\n", 4395 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4029 i, xclient->stats_counter, bp->stats_counter); 4396 i, xclient->stats_counter, bp->stats_counter);
4030 return -1; 4397 return -1;
4031 } 4398 }
4032 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) != 4399 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4033 bp->stats_counter) { 4400 bp->stats_counter) {
4034 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm" 4401 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4035 " tstorm counter (%d) != stats_counter (%d)\n", 4402 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4036 i, tclient->stats_counter, bp->stats_counter); 4403 i, tclient->stats_counter, bp->stats_counter);
4037 return -2; 4404 return -2;
4038 } 4405 }
4039 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) != 4406 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4040 bp->stats_counter) { 4407 bp->stats_counter) {
4041 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm" 4408 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4042 " ustorm counter (%d) != stats_counter (%d)\n", 4409 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4043 i, uclient->stats_counter, bp->stats_counter); 4410 i, uclient->stats_counter, bp->stats_counter);
4044 return -4; 4411 return -4;
4045 } 4412 }
@@ -4059,6 +4426,21 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
4059 qstats->total_bytes_received_lo, 4426 qstats->total_bytes_received_lo,
4060 le32_to_cpu(tclient->rcv_unicast_bytes.lo)); 4427 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4061 4428
4429 SUB_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4433
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4438
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4443
4062 qstats->valid_bytes_received_hi = 4444 qstats->valid_bytes_received_hi =
4063 qstats->total_bytes_received_hi; 4445 qstats->total_bytes_received_hi;
4064 qstats->valid_bytes_received_lo = 4446 qstats->valid_bytes_received_lo =
@@ -4307,47 +4689,43 @@ static void bnx2x_stats_update(struct bnx2x *bp)
4307 bnx2x_drv_stats_update(bp); 4689 bnx2x_drv_stats_update(bp);
4308 4690
4309 if (netif_msg_timer(bp)) { 4691 if (netif_msg_timer(bp)) {
4310 struct bnx2x_fastpath *fp0_rx = bp->fp;
4311 struct bnx2x_fastpath *fp0_tx = bp->fp;
4312 struct tstorm_per_client_stats *old_tclient =
4313 &bp->fp->old_tclient;
4314 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4315 struct bnx2x_eth_stats *estats = &bp->eth_stats; 4692 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4316 struct net_device_stats *nstats = &bp->dev->stats;
4317 int i; 4693 int i;
4318 4694
4319 netdev_printk(KERN_DEBUG, bp->dev, "\n"); 4695 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4320 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)" 4696 bp->dev->name,
4321 " tx pkt (%lx)\n",
4322 bnx2x_tx_avail(fp0_tx),
4323 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4324 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4325 " rx pkt (%lx)\n",
4326 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4327 fp0_rx->rx_comp_cons),
4328 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4329 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4330 "brb truncate %u\n",
4331 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4332 qstats->driver_xoff,
4333 estats->brb_drop_lo, estats->brb_truncate_lo); 4697 estats->brb_drop_lo, estats->brb_truncate_lo);
4334 printk(KERN_DEBUG "tstats: checksum_discard %u "
4335 "packets_too_big_discard %lu no_buff_discard %lu "
4336 "mac_discard %u mac_filter_discard %u "
4337 "xxovrflow_discard %u brb_truncate_discard %u "
4338 "ttl0_discard %u\n",
4339 le32_to_cpu(old_tclient->checksum_discard),
4340 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4341 bnx2x_hilo(&qstats->no_buff_discard_hi),
4342 estats->mac_discard, estats->mac_filter_discard,
4343 estats->xxoverflow_discard, estats->brb_truncate_discard,
4344 le32_to_cpu(old_tclient->ttl0_discard));
4345 4698
4346 for_each_queue(bp, i) { 4699 for_each_queue(bp, i) {
4347 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i, 4700 struct bnx2x_fastpath *fp = &bp->fp[i];
4348 bnx2x_fp(bp, i, tx_pkt), 4701 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4349 bnx2x_fp(bp, i, rx_pkt), 4702
4350 bnx2x_fp(bp, i, rx_calls)); 4703 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4704 " rx pkt(%lu) rx calls(%lu %lu)\n",
4705 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4706 fp->rx_comp_cons),
4707 le16_to_cpu(*fp->rx_cons_sb),
4708 bnx2x_hilo(&qstats->
4709 total_unicast_packets_received_hi),
4710 fp->rx_calls, fp->rx_pkt);
4711 }
4712
4713 for_each_queue(bp, i) {
4714 struct bnx2x_fastpath *fp = &bp->fp[i];
4715 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4716 struct netdev_queue *txq =
4717 netdev_get_tx_queue(bp->dev, i);
4718
4719 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4720 " tx pkt(%lu) tx calls (%lu)"
4721 " %s (Xoff events %u)\n",
4722 fp->name, bnx2x_tx_avail(fp),
4723 le16_to_cpu(*fp->tx_cons_sb),
4724 bnx2x_hilo(&qstats->
4725 total_unicast_packets_transmitted_hi),
4726 fp->tx_pkt,
4727 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4728 qstats->driver_xoff);
4351 } 4729 }
4352 } 4730 }
4353 4731
@@ -4468,6 +4846,9 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4468{ 4846{
4469 enum bnx2x_stats_state state = bp->stats_state; 4847 enum bnx2x_stats_state state = bp->stats_state;
4470 4848
4849 if (unlikely(bp->panic))
4850 return;
4851
4471 bnx2x_stats_stm[state][event].action(bp); 4852 bnx2x_stats_stm[state][event].action(bp);
4472 bp->stats_state = bnx2x_stats_stm[state][event].next_state; 4853 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4473 4854
@@ -4940,9 +5321,9 @@ static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4940 } 5321 }
4941 5322
4942 if (fp->tpa_state[i] == BNX2X_TPA_START) 5323 if (fp->tpa_state[i] == BNX2X_TPA_START)
4943 pci_unmap_single(bp->pdev, 5324 dma_unmap_single(&bp->pdev->dev,
4944 pci_unmap_addr(rx_buf, mapping), 5325 dma_unmap_addr(rx_buf, mapping),
4945 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 5326 bp->rx_buf_size, DMA_FROM_DEVICE);
4946 5327
4947 dev_kfree_skb(skb); 5328 dev_kfree_skb(skb);
4948 rx_buf->skb = NULL; 5329 rx_buf->skb = NULL;
@@ -4978,7 +5359,7 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
4978 fp->disable_tpa = 1; 5359 fp->disable_tpa = 1;
4979 break; 5360 break;
4980 } 5361 }
4981 pci_unmap_addr_set((struct sw_rx_bd *) 5362 dma_unmap_addr_set((struct sw_rx_bd *)
4982 &bp->fp->tpa_pool[i], 5363 &bp->fp->tpa_pool[i],
4983 mapping, 0); 5364 mapping, 0);
4984 fp->tpa_state[i] = BNX2X_TPA_STOP; 5365 fp->tpa_state[i] = BNX2X_TPA_STOP;
@@ -5072,8 +5453,8 @@ static void bnx2x_init_rx_rings(struct bnx2x *bp)
5072 5453
5073 fp->rx_bd_prod = ring_prod; 5454 fp->rx_bd_prod = ring_prod;
5074 /* must not have more available CQEs than BDs */ 5455 /* must not have more available CQEs than BDs */
5075 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT), 5456 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5076 cqe_ring_prod); 5457 cqe_ring_prod);
5077 fp->rx_pkt = fp->rx_calls = 0; 5458 fp->rx_pkt = fp->rx_calls = 0;
5078 5459
5079 /* Warning! 5460 /* Warning!
@@ -5179,8 +5560,8 @@ static void bnx2x_init_context(struct bnx2x *bp)
5179 context->ustorm_st_context.common.flags |= 5560 context->ustorm_st_context.common.flags |=
5180 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA; 5561 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5181 context->ustorm_st_context.common.sge_buff_size = 5562 context->ustorm_st_context.common.sge_buff_size =
5182 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE, 5563 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5183 (u32)0xffff); 5564 0xffff);
5184 context->ustorm_st_context.common.sge_page_base_hi = 5565 context->ustorm_st_context.common.sge_page_base_hi =
5185 U64_HI(fp->rx_sge_mapping); 5566 U64_HI(fp->rx_sge_mapping);
5186 context->ustorm_st_context.common.sge_page_base_lo = 5567 context->ustorm_st_context.common.sge_page_base_lo =
@@ -5477,10 +5858,8 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5477 } 5858 }
5478 5859
5479 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */ 5860 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5480 max_agg_size = 5861 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5481 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) * 5862 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5482 SGE_PAGE_SIZE * PAGES_PER_SGE),
5483 (u32)0xffff);
5484 for_each_queue(bp, i) { 5863 for_each_queue(bp, i) {
5485 struct bnx2x_fastpath *fp = &bp->fp[i]; 5864 struct bnx2x_fastpath *fp = &bp->fp[i];
5486 5865
@@ -5566,7 +5945,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5566 } 5945 }
5567 5946
5568 5947
5569 /* Store it to internal memory */ 5948 /* Store cmng structures to internal memory */
5570 if (bp->port.pmf) 5949 if (bp->port.pmf)
5571 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++) 5950 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5572 REG_WR(bp, BAR_XSTRORM_INTMEM + 5951 REG_WR(bp, BAR_XSTRORM_INTMEM +
@@ -5658,8 +6037,8 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5658 6037
5659static int bnx2x_gunzip_init(struct bnx2x *bp) 6038static int bnx2x_gunzip_init(struct bnx2x *bp)
5660{ 6039{
5661 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE, 6040 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
5662 &bp->gunzip_mapping); 6041 &bp->gunzip_mapping, GFP_KERNEL);
5663 if (bp->gunzip_buf == NULL) 6042 if (bp->gunzip_buf == NULL)
5664 goto gunzip_nomem1; 6043 goto gunzip_nomem1;
5665 6044
@@ -5679,12 +6058,13 @@ gunzip_nomem3:
5679 bp->strm = NULL; 6058 bp->strm = NULL;
5680 6059
5681gunzip_nomem2: 6060gunzip_nomem2:
5682 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6061 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5683 bp->gunzip_mapping); 6062 bp->gunzip_mapping);
5684 bp->gunzip_buf = NULL; 6063 bp->gunzip_buf = NULL;
5685 6064
5686gunzip_nomem1: 6065gunzip_nomem1:
5687 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n"); 6066 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6067 " un-compression\n");
5688 return -ENOMEM; 6068 return -ENOMEM;
5689} 6069}
5690 6070
@@ -5696,8 +6076,8 @@ static void bnx2x_gunzip_end(struct bnx2x *bp)
5696 bp->strm = NULL; 6076 bp->strm = NULL;
5697 6077
5698 if (bp->gunzip_buf) { 6078 if (bp->gunzip_buf) {
5699 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf, 6079 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
5700 bp->gunzip_mapping); 6080 bp->gunzip_mapping);
5701 bp->gunzip_buf = NULL; 6081 bp->gunzip_buf = NULL;
5702 } 6082 }
5703} 6083}
@@ -5735,8 +6115,9 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5735 6115
5736 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out); 6116 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5737 if (bp->gunzip_outlen & 0x3) 6117 if (bp->gunzip_outlen & 0x3)
5738 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n", 6118 netdev_err(bp->dev, "Firmware decompression error:"
5739 bp->gunzip_outlen); 6119 " gunzip_outlen (%d) not aligned\n",
6120 bp->gunzip_outlen);
5740 bp->gunzip_outlen >>= 2; 6121 bp->gunzip_outlen >>= 2;
5741 6122
5742 zlib_inflateEnd(bp->strm); 6123 zlib_inflateEnd(bp->strm);
@@ -5962,6 +6343,50 @@ static void enable_blocks_attention(struct bnx2x *bp)
5962 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 6343 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5963} 6344}
5964 6345
6346static const struct {
6347 u32 addr;
6348 u32 mask;
6349} bnx2x_parity_mask[] = {
6350 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6351 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6352 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6353 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6354 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6355 {QM_REG_QM_PRTY_MASK, 0x0},
6356 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6357 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6358 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6359 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6360 {CDU_REG_CDU_PRTY_MASK, 0x0},
6361 {CFC_REG_CFC_PRTY_MASK, 0x0},
6362 {DBG_REG_DBG_PRTY_MASK, 0x0},
6363 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6364 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6365 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6366 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6367 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6368 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6369 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6370 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6371 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6372 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6373 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6374 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6375 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6376 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6377 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6378};
6379
6380static void enable_blocks_parity(struct bnx2x *bp)
6381{
6382 int i, mask_arr_len =
6383 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6384
6385 for (i = 0; i < mask_arr_len; i++)
6386 REG_WR(bp, bnx2x_parity_mask[i].addr,
6387 bnx2x_parity_mask[i].mask);
6388}
6389
5965 6390
5966static void bnx2x_reset_common(struct bnx2x *bp) 6391static void bnx2x_reset_common(struct bnx2x *bp)
5967{ 6392{
@@ -5992,10 +6417,14 @@ static void bnx2x_init_pxp(struct bnx2x *bp)
5992 6417
5993static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp) 6418static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5994{ 6419{
6420 int is_required;
5995 u32 val; 6421 u32 val;
5996 u8 port; 6422 int port;
5997 u8 is_required = 0; 6423
6424 if (BP_NOMCP(bp))
6425 return;
5998 6426
6427 is_required = 0;
5999 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) & 6428 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6000 SHARED_HW_CFG_FAN_FAILURE_MASK; 6429 SHARED_HW_CFG_FAN_FAILURE_MASK;
6001 6430
@@ -6034,7 +6463,7 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6034 /* set to active low mode */ 6463 /* set to active low mode */
6035 val = REG_RD(bp, MISC_REG_SPIO_INT); 6464 val = REG_RD(bp, MISC_REG_SPIO_INT);
6036 val |= ((1 << MISC_REGISTERS_SPIO_5) << 6465 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6037 MISC_REGISTERS_SPIO_INT_OLD_SET_POS); 6466 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6038 REG_WR(bp, MISC_REG_SPIO_INT, val); 6467 REG_WR(bp, MISC_REG_SPIO_INT, val);
6039 6468
6040 /* enable interrupt to signal the IGU */ 6469 /* enable interrupt to signal the IGU */
@@ -6221,7 +6650,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6221 6650
6222 if (sizeof(union cdu_context) != 1024) 6651 if (sizeof(union cdu_context) != 1024)
6223 /* we currently assume that a context is 1024 bytes */ 6652 /* we currently assume that a context is 1024 bytes */
6224 pr_alert("please adjust the size of cdu_context(%ld)\n", 6653 dev_alert(&bp->pdev->dev, "please adjust the size "
6654 "of cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context)); 6655 (long)sizeof(union cdu_context));
6226 6656
6227 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE); 6657 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
@@ -6305,6 +6735,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
6305 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 6735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6306 6736
6307 enable_blocks_attention(bp); 6737 enable_blocks_attention(bp);
6738 if (CHIP_PARITY_SUPPORTED(bp))
6739 enable_blocks_parity(bp);
6308 6740
6309 if (!BP_NOMCP(bp)) { 6741 if (!BP_NOMCP(bp)) {
6310 bnx2x_acquire_phy_lock(bp); 6742 bnx2x_acquire_phy_lock(bp);
@@ -6323,7 +6755,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6323 u32 low, high; 6755 u32 low, high;
6324 u32 val; 6756 u32 val;
6325 6757
6326 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port); 6758 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6327 6759
6328 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0); 6760 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6329 6761
@@ -6342,6 +6774,7 @@ static int bnx2x_init_port(struct bnx2x *bp)
6342 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20); 6774 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6343 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31); 6775 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6344#endif 6776#endif
6777
6345 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6778 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6346 6779
6347 bnx2x_init_block(bp, BRB1_BLOCK, init_stage); 6780 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
@@ -6534,7 +6967,7 @@ static int bnx2x_init_func(struct bnx2x *bp)
6534 u32 addr, val; 6967 u32 addr, val;
6535 int i; 6968 int i;
6536 6969
6537 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func); 6970 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6538 6971
6539 /* set MSI reconfigure capability */ 6972 /* set MSI reconfigure capability */
6540 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0); 6973 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
@@ -6692,7 +7125,7 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6692#define BNX2X_PCI_FREE(x, y, size) \ 7125#define BNX2X_PCI_FREE(x, y, size) \
6693 do { \ 7126 do { \
6694 if (x) { \ 7127 if (x) { \
6695 pci_free_consistent(bp->pdev, size, x, y); \ 7128 dma_free_coherent(&bp->pdev->dev, size, x, y); \
6696 x = NULL; \ 7129 x = NULL; \
6697 y = 0; \ 7130 y = 0; \
6698 } \ 7131 } \
@@ -6773,7 +7206,7 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6773 7206
6774#define BNX2X_PCI_ALLOC(x, y, size) \ 7207#define BNX2X_PCI_ALLOC(x, y, size) \
6775 do { \ 7208 do { \
6776 x = pci_alloc_consistent(bp->pdev, size, y); \ 7209 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
6777 if (x == NULL) \ 7210 if (x == NULL) \
6778 goto alloc_mem_err; \ 7211 goto alloc_mem_err; \
6779 memset(x, 0, size); \ 7212 memset(x, 0, size); \
@@ -6906,9 +7339,9 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6906 if (skb == NULL) 7339 if (skb == NULL)
6907 continue; 7340 continue;
6908 7341
6909 pci_unmap_single(bp->pdev, 7342 dma_unmap_single(&bp->pdev->dev,
6910 pci_unmap_addr(rx_buf, mapping), 7343 dma_unmap_addr(rx_buf, mapping),
6911 bp->rx_buf_size, PCI_DMA_FROMDEVICE); 7344 bp->rx_buf_size, DMA_FROM_DEVICE);
6912 7345
6913 rx_buf->skb = NULL; 7346 rx_buf->skb = NULL;
6914 dev_kfree_skb(skb); 7347 dev_kfree_skb(skb);
@@ -6987,7 +7420,31 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6987 7420
6988 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 7421 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6989 BNX2X_NUM_QUEUES(bp) + offset); 7422 BNX2X_NUM_QUEUES(bp) + offset);
6990 if (rc) { 7423
7424 /*
7425 * reconfigure number of tx/rx queues according to available
7426 * MSI-X vectors
7427 */
7428 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7429 /* vectors available for FP */
7430 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7431
7432 DP(NETIF_MSG_IFUP,
7433 "Trying to use less MSI-X vectors: %d\n", rc);
7434
7435 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7436
7437 if (rc) {
7438 DP(NETIF_MSG_IFUP,
7439 "MSI-X is not attainable rc %d\n", rc);
7440 return rc;
7441 }
7442
7443 bp->num_queues = min(bp->num_queues, fp_vec);
7444
7445 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7446 bp->num_queues);
7447 } else if (rc) {
6991 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc); 7448 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6992 return rc; 7449 return rc;
6993 } 7450 }
@@ -7028,10 +7485,11 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7028 } 7485 }
7029 7486
7030 i = BNX2X_NUM_QUEUES(bp); 7487 i = BNX2X_NUM_QUEUES(bp);
7031 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n", 7488 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7032 bp->msix_table[0].vector, 7489 " ... fp[%d] %d\n",
7033 0, bp->msix_table[offset].vector, 7490 bp->msix_table[0].vector,
7034 i - 1, bp->msix_table[offset + i - 1].vector); 7491 0, bp->msix_table[offset].vector,
7492 i - 1, bp->msix_table[offset + i - 1].vector);
7035 7493
7036 return 0; 7494 return 0;
7037} 7495}
@@ -7409,8 +7867,6 @@ static int bnx2x_set_num_queues(struct bnx2x *bp)
7409 bp->num_queues = 1; 7867 bp->num_queues = 1;
7410 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 7868 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7411 break; 7869 break;
7412
7413 case INT_MODE_MSIX:
7414 default: 7870 default:
7415 /* Set number of queues according to bp->multi_mode value */ 7871 /* Set number of queues according to bp->multi_mode value */
7416 bnx2x_set_num_queues_msix(bp); 7872 bnx2x_set_num_queues_msix(bp);
@@ -7656,6 +8112,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7656 if (bp->state == BNX2X_STATE_OPEN) 8112 if (bp->state == BNX2X_STATE_OPEN)
7657 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD); 8113 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7658#endif 8114#endif
8115 bnx2x_inc_load_cnt(bp);
7659 8116
7660 return 0; 8117 return 0;
7661 8118
@@ -7843,33 +8300,12 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7843 } 8300 }
7844} 8301}
7845 8302
7846/* must be called with rtnl_lock */ 8303static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
7847static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7848{ 8304{
7849 int port = BP_PORT(bp); 8305 int port = BP_PORT(bp);
7850 u32 reset_code = 0; 8306 u32 reset_code = 0;
7851 int i, cnt, rc; 8307 int i, cnt, rc;
7852 8308
7853#ifdef BCM_CNIC
7854 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7855#endif
7856 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7857
7858 /* Set "drop all" */
7859 bp->rx_mode = BNX2X_RX_MODE_NONE;
7860 bnx2x_set_storm_rx_mode(bp);
7861
7862 /* Disable HW interrupts, NAPI and Tx */
7863 bnx2x_netif_stop(bp, 1);
7864
7865 del_timer_sync(&bp->timer);
7866 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7867 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7868 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7869
7870 /* Release IRQs */
7871 bnx2x_free_irq(bp, false);
7872
7873 /* Wait until tx fastpath tasks complete */ 8309 /* Wait until tx fastpath tasks complete */
7874 for_each_queue(bp, i) { 8310 for_each_queue(bp, i) {
7875 struct bnx2x_fastpath *fp = &bp->fp[i]; 8311 struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -8010,6 +8446,69 @@ unload_error:
8010 if (!BP_NOMCP(bp)) 8446 if (!BP_NOMCP(bp))
8011 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE); 8447 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8012 8448
8449}
8450
8451static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8452{
8453 u32 val;
8454
8455 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8456
8457 if (CHIP_IS_E1(bp)) {
8458 int port = BP_PORT(bp);
8459 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8460 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8461
8462 val = REG_RD(bp, addr);
8463 val &= ~(0x300);
8464 REG_WR(bp, addr, val);
8465 } else if (CHIP_IS_E1H(bp)) {
8466 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8467 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8468 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8469 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8470 }
8471}
8472
8473/* must be called with rtnl_lock */
8474static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8475{
8476 int i;
8477
8478 if (bp->state == BNX2X_STATE_CLOSED) {
8479 /* Interface has been removed - nothing to recover */
8480 bp->recovery_state = BNX2X_RECOVERY_DONE;
8481 bp->is_leader = 0;
8482 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8483 smp_wmb();
8484
8485 return -EINVAL;
8486 }
8487
8488#ifdef BCM_CNIC
8489 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8490#endif
8491 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8492
8493 /* Set "drop all" */
8494 bp->rx_mode = BNX2X_RX_MODE_NONE;
8495 bnx2x_set_storm_rx_mode(bp);
8496
8497 /* Disable HW interrupts, NAPI and Tx */
8498 bnx2x_netif_stop(bp, 1);
8499
8500 del_timer_sync(&bp->timer);
8501 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8502 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8503 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8504
8505 /* Release IRQs */
8506 bnx2x_free_irq(bp, false);
8507
8508 /* Cleanup the chip if needed */
8509 if (unload_mode != UNLOAD_RECOVERY)
8510 bnx2x_chip_cleanup(bp, unload_mode);
8511
8013 bp->port.pmf = 0; 8512 bp->port.pmf = 0;
8014 8513
8015 /* Free SKBs, SGEs, TPA pool and driver internals */ 8514 /* Free SKBs, SGEs, TPA pool and driver internals */
@@ -8024,17 +8523,448 @@ unload_error:
8024 8523
8025 netif_carrier_off(bp->dev); 8524 netif_carrier_off(bp->dev);
8026 8525
8526 /* The last driver must disable a "close the gate" if there is no
8527 * parity attention or "process kill" pending.
8528 */
8529 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8530 bnx2x_reset_is_done(bp))
8531 bnx2x_disable_close_the_gate(bp);
8532
8533 /* Reset MCP mail box sequence if there is on going recovery */
8534 if (unload_mode == UNLOAD_RECOVERY)
8535 bp->fw_seq = 0;
8536
8027 return 0; 8537 return 0;
8028} 8538}
8029 8539
8540/* Close gates #2, #3 and #4: */
8541static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8542{
8543 u32 val, addr;
8544
8545 /* Gates #2 and #4a are closed/opened for "not E1" only */
8546 if (!CHIP_IS_E1(bp)) {
8547 /* #4 */
8548 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8549 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8550 close ? (val | 0x1) : (val & (~(u32)1)));
8551 /* #2 */
8552 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8553 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8554 close ? (val | 0x1) : (val & (~(u32)1)));
8555 }
8556
8557 /* #3 */
8558 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8559 val = REG_RD(bp, addr);
8560 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8561
8562 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8563 close ? "closing" : "opening");
8564 mmiowb();
8565}
8566
8567#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8568
8569static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8570{
8571 /* Do some magic... */
8572 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8573 *magic_val = val & SHARED_MF_CLP_MAGIC;
8574 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8575}
8576
8577/* Restore the value of the `magic' bit.
8578 *
8579 * @param pdev Device handle.
8580 * @param magic_val Old value of the `magic' bit.
8581 */
8582static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8583{
8584 /* Restore the `magic' bit value... */
8585 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8586 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8587 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8588 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8589 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8590 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8591}
8592
8593/* Prepares for MCP reset: takes care of CLP configurations.
8594 *
8595 * @param bp
8596 * @param magic_val Old value of 'magic' bit.
8597 */
8598static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8599{
8600 u32 shmem;
8601 u32 validity_offset;
8602
8603 DP(NETIF_MSG_HW, "Starting\n");
8604
8605 /* Set `magic' bit in order to save MF config */
8606 if (!CHIP_IS_E1(bp))
8607 bnx2x_clp_reset_prep(bp, magic_val);
8608
8609 /* Get shmem offset */
8610 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8611 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8612
8613 /* Clear validity map flags */
8614 if (shmem > 0)
8615 REG_WR(bp, shmem + validity_offset, 0);
8616}
8617
8618#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8619#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8620
8621/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8622 * depending on the HW type.
8623 *
8624 * @param bp
8625 */
8626static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8627{
8628 /* special handling for emulation and FPGA,
8629 wait 10 times longer */
8630 if (CHIP_REV_IS_SLOW(bp))
8631 msleep(MCP_ONE_TIMEOUT*10);
8632 else
8633 msleep(MCP_ONE_TIMEOUT);
8634}
8635
8636static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8637{
8638 u32 shmem, cnt, validity_offset, val;
8639 int rc = 0;
8640
8641 msleep(100);
8642
8643 /* Get shmem offset */
8644 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8645 if (shmem == 0) {
8646 BNX2X_ERR("Shmem 0 return failure\n");
8647 rc = -ENOTTY;
8648 goto exit_lbl;
8649 }
8650
8651 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8652
8653 /* Wait for MCP to come up */
8654 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8655 /* TBD: its best to check validity map of last port.
8656 * currently checks on port 0.
8657 */
8658 val = REG_RD(bp, shmem + validity_offset);
8659 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8660 shmem + validity_offset, val);
8661
8662 /* check that shared memory is valid. */
8663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8664 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8665 break;
8666
8667 bnx2x_mcp_wait_one(bp);
8668 }
8669
8670 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8671
8672 /* Check that shared memory is valid. This indicates that MCP is up. */
8673 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8674 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8675 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8676 rc = -ENOTTY;
8677 goto exit_lbl;
8678 }
8679
8680exit_lbl:
8681 /* Restore the `magic' bit value */
8682 if (!CHIP_IS_E1(bp))
8683 bnx2x_clp_reset_done(bp, magic_val);
8684
8685 return rc;
8686}
8687
8688static void bnx2x_pxp_prep(struct bnx2x *bp)
8689{
8690 if (!CHIP_IS_E1(bp)) {
8691 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8692 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8693 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8694 mmiowb();
8695 }
8696}
8697
8698/*
8699 * Reset the whole chip except for:
8700 * - PCIE core
8701 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8702 * one reset bit)
8703 * - IGU
8704 * - MISC (including AEU)
8705 * - GRC
8706 * - RBCN, RBCP
8707 */
8708static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8709{
8710 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8711
8712 not_reset_mask1 =
8713 MISC_REGISTERS_RESET_REG_1_RST_HC |
8714 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8715 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8716
8717 not_reset_mask2 =
8718 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8719 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8720 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8721 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8722 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8723 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8724 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8726
8727 reset_mask1 = 0xffffffff;
8728
8729 if (CHIP_IS_E1(bp))
8730 reset_mask2 = 0xffff;
8731 else
8732 reset_mask2 = 0x1ffff;
8733
8734 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8735 reset_mask1 & (~not_reset_mask1));
8736 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8737 reset_mask2 & (~not_reset_mask2));
8738
8739 barrier();
8740 mmiowb();
8741
8742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8744 mmiowb();
8745}
8746
8747static int bnx2x_process_kill(struct bnx2x *bp)
8748{
8749 int cnt = 1000;
8750 u32 val = 0;
8751 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8752
8753
8754 /* Empty the Tetris buffer, wait for 1s */
8755 do {
8756 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8757 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8758 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8759 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8760 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8761 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8762 ((port_is_idle_0 & 0x1) == 0x1) &&
8763 ((port_is_idle_1 & 0x1) == 0x1) &&
8764 (pgl_exp_rom2 == 0xffffffff))
8765 break;
8766 msleep(1);
8767 } while (cnt-- > 0);
8768
8769 if (cnt <= 0) {
8770 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8771 " are still"
8772 " outstanding read requests after 1s!\n");
8773 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8774 " port_is_idle_0=0x%08x,"
8775 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8776 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8777 pgl_exp_rom2);
8778 return -EAGAIN;
8779 }
8780
8781 barrier();
8782
8783 /* Close gates #2, #3 and #4 */
8784 bnx2x_set_234_gates(bp, true);
8785
8786 /* TBD: Indicate that "process kill" is in progress to MCP */
8787
8788 /* Clear "unprepared" bit */
8789 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8790 barrier();
8791
8792 /* Make sure all is written to the chip before the reset */
8793 mmiowb();
8794
8795 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8796 * PSWHST, GRC and PSWRD Tetris buffer.
8797 */
8798 msleep(1);
8799
8800 /* Prepare to chip reset: */
8801 /* MCP */
8802 bnx2x_reset_mcp_prep(bp, &val);
8803
8804 /* PXP */
8805 bnx2x_pxp_prep(bp);
8806 barrier();
8807
8808 /* reset the chip */
8809 bnx2x_process_kill_chip_reset(bp);
8810 barrier();
8811
8812 /* Recover after reset: */
8813 /* MCP */
8814 if (bnx2x_reset_mcp_comp(bp, val))
8815 return -EAGAIN;
8816
8817 /* PXP */
8818 bnx2x_pxp_prep(bp);
8819
8820 /* Open the gates #2, #3 and #4 */
8821 bnx2x_set_234_gates(bp, false);
8822
8823 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8824 * reset state, re-enable attentions. */
8825
8826 return 0;
8827}
8828
8829static int bnx2x_leader_reset(struct bnx2x *bp)
8830{
8831 int rc = 0;
8832 /* Try to recover after the failure */
8833 if (bnx2x_process_kill(bp)) {
8834 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8835 bp->dev->name);
8836 rc = -EAGAIN;
8837 goto exit_leader_reset;
8838 }
8839
8840 /* Clear "reset is in progress" bit and update the driver state */
8841 bnx2x_set_reset_done(bp);
8842 bp->recovery_state = BNX2X_RECOVERY_DONE;
8843
8844exit_leader_reset:
8845 bp->is_leader = 0;
8846 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8847 smp_wmb();
8848 return rc;
8849}
8850
8851static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8852
8853/* Assumption: runs under rtnl lock. This together with the fact
8854 * that it's called only from bnx2x_reset_task() ensure that it
8855 * will never be called when netif_running(bp->dev) is false.
8856 */
8857static void bnx2x_parity_recover(struct bnx2x *bp)
8858{
8859 DP(NETIF_MSG_HW, "Handling parity\n");
8860 while (1) {
8861 switch (bp->recovery_state) {
8862 case BNX2X_RECOVERY_INIT:
8863 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8864 /* Try to get a LEADER_LOCK HW lock */
8865 if (bnx2x_trylock_hw_lock(bp,
8866 HW_LOCK_RESOURCE_RESERVED_08))
8867 bp->is_leader = 1;
8868
8869 /* Stop the driver */
8870 /* If interface has been removed - break */
8871 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8872 return;
8873
8874 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8875 /* Ensure "is_leader" and "recovery_state"
8876 * update values are seen on other CPUs
8877 */
8878 smp_wmb();
8879 break;
8880
8881 case BNX2X_RECOVERY_WAIT:
8882 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8883 if (bp->is_leader) {
8884 u32 load_counter = bnx2x_get_load_cnt(bp);
8885 if (load_counter) {
8886 /* Wait until all other functions get
8887 * down.
8888 */
8889 schedule_delayed_work(&bp->reset_task,
8890 HZ/10);
8891 return;
8892 } else {
8893 /* If all other functions got down -
8894 * try to bring the chip back to
8895 * normal. In any case it's an exit
8896 * point for a leader.
8897 */
8898 if (bnx2x_leader_reset(bp) ||
8899 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8900 printk(KERN_ERR"%s: Recovery "
8901 "has failed. Power cycle is "
8902 "needed.\n", bp->dev->name);
8903 /* Disconnect this device */
8904 netif_device_detach(bp->dev);
8905 /* Block ifup for all function
8906 * of this ASIC until
8907 * "process kill" or power
8908 * cycle.
8909 */
8910 bnx2x_set_reset_in_progress(bp);
8911 /* Shut down the power */
8912 bnx2x_set_power_state(bp,
8913 PCI_D3hot);
8914 return;
8915 }
8916
8917 return;
8918 }
8919 } else { /* non-leader */
8920 if (!bnx2x_reset_is_done(bp)) {
8921 /* Try to get a LEADER_LOCK HW lock as
8922 * long as a former leader may have
8923 * been unloaded by the user or
8924 * released a leadership by another
8925 * reason.
8926 */
8927 if (bnx2x_trylock_hw_lock(bp,
8928 HW_LOCK_RESOURCE_RESERVED_08)) {
8929 /* I'm a leader now! Restart a
8930 * switch case.
8931 */
8932 bp->is_leader = 1;
8933 break;
8934 }
8935
8936 schedule_delayed_work(&bp->reset_task,
8937 HZ/10);
8938 return;
8939
8940 } else { /* A leader has completed
8941 * the "process kill". It's an exit
8942 * point for a non-leader.
8943 */
8944 bnx2x_nic_load(bp, LOAD_NORMAL);
8945 bp->recovery_state =
8946 BNX2X_RECOVERY_DONE;
8947 smp_wmb();
8948 return;
8949 }
8950 }
8951 default:
8952 return;
8953 }
8954 }
8955}
8956
8957/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8958 * scheduled on a general queue in order to prevent a dead lock.
8959 */
8030static void bnx2x_reset_task(struct work_struct *work) 8960static void bnx2x_reset_task(struct work_struct *work)
8031{ 8961{
8032 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task); 8962 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8033 8963
8034#ifdef BNX2X_STOP_ON_ERROR 8964#ifdef BNX2X_STOP_ON_ERROR
8035 BNX2X_ERR("reset task called but STOP_ON_ERROR defined" 8965 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8036 " so reset not done to allow debug dump,\n" 8966 " so reset not done to allow debug dump,\n"
8037 " you will need to reboot when done\n"); 8967 KERN_ERR " you will need to reboot when done\n");
8038 return; 8968 return;
8039#endif 8969#endif
8040 8970
@@ -8043,8 +8973,12 @@ static void bnx2x_reset_task(struct work_struct *work)
8043 if (!netif_running(bp->dev)) 8973 if (!netif_running(bp->dev))
8044 goto reset_task_exit; 8974 goto reset_task_exit;
8045 8975
8046 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 8976 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8047 bnx2x_nic_load(bp, LOAD_NORMAL); 8977 bnx2x_parity_recover(bp);
8978 else {
8979 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8980 bnx2x_nic_load(bp, LOAD_NORMAL);
8981 }
8048 8982
8049reset_task_exit: 8983reset_task_exit:
8050 rtnl_unlock(); 8984 rtnl_unlock();
@@ -8264,7 +9198,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8264 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]); 9198 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8265 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9199 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8266 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) 9200 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8267 BNX2X_ERR("BAD MCP validity signature\n"); 9201 BNX2X_ERROR("BAD MCP validity signature\n");
8268 9202
8269 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config); 9203 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
8270 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config); 9204 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
@@ -8288,8 +9222,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8288 if (val < BNX2X_BC_VER) { 9222 if (val < BNX2X_BC_VER) {
8289 /* for now only warn 9223 /* for now only warn
8290 * later we might need to enforce this */ 9224 * later we might need to enforce this */
8291 BNX2X_ERR("This driver needs bc_ver %X but found %X," 9225 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
8292 " please upgrade BC\n", BNX2X_BC_VER, val); 9226 "please upgrade BC\n", BNX2X_BC_VER, val);
8293 } 9227 }
8294 bp->link_params.feature_config_flags |= 9228 bp->link_params.feature_config_flags |=
8295 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ? 9229 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
@@ -8310,7 +9244,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]); 9244 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]); 9245 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8312 9246
8313 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4); 9247 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9248 val, val2, val3, val4);
8314} 9249}
8315 9250
8316static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp, 9251static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8588,11 +9523,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8588 bp->port.advertising = (ADVERTISED_10baseT_Full | 9523 bp->port.advertising = (ADVERTISED_10baseT_Full |
8589 ADVERTISED_TP); 9524 ADVERTISED_TP);
8590 } else { 9525 } else {
8591 BNX2X_ERR("NVRAM config error. " 9526 BNX2X_ERROR("NVRAM config error. "
8592 "Invalid link_config 0x%x" 9527 "Invalid link_config 0x%x"
8593 " speed_cap_mask 0x%x\n", 9528 " speed_cap_mask 0x%x\n",
8594 bp->port.link_config, 9529 bp->port.link_config,
8595 bp->link_params.speed_cap_mask); 9530 bp->link_params.speed_cap_mask);
8596 return; 9531 return;
8597 } 9532 }
8598 break; 9533 break;
@@ -8604,11 +9539,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8604 bp->port.advertising = (ADVERTISED_10baseT_Half | 9539 bp->port.advertising = (ADVERTISED_10baseT_Half |
8605 ADVERTISED_TP); 9540 ADVERTISED_TP);
8606 } else { 9541 } else {
8607 BNX2X_ERR("NVRAM config error. " 9542 BNX2X_ERROR("NVRAM config error. "
8608 "Invalid link_config 0x%x" 9543 "Invalid link_config 0x%x"
8609 " speed_cap_mask 0x%x\n", 9544 " speed_cap_mask 0x%x\n",
8610 bp->port.link_config, 9545 bp->port.link_config,
8611 bp->link_params.speed_cap_mask); 9546 bp->link_params.speed_cap_mask);
8612 return; 9547 return;
8613 } 9548 }
8614 break; 9549 break;
@@ -8619,11 +9554,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8619 bp->port.advertising = (ADVERTISED_100baseT_Full | 9554 bp->port.advertising = (ADVERTISED_100baseT_Full |
8620 ADVERTISED_TP); 9555 ADVERTISED_TP);
8621 } else { 9556 } else {
8622 BNX2X_ERR("NVRAM config error. " 9557 BNX2X_ERROR("NVRAM config error. "
8623 "Invalid link_config 0x%x" 9558 "Invalid link_config 0x%x"
8624 " speed_cap_mask 0x%x\n", 9559 " speed_cap_mask 0x%x\n",
8625 bp->port.link_config, 9560 bp->port.link_config,
8626 bp->link_params.speed_cap_mask); 9561 bp->link_params.speed_cap_mask);
8627 return; 9562 return;
8628 } 9563 }
8629 break; 9564 break;
@@ -8635,11 +9570,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8635 bp->port.advertising = (ADVERTISED_100baseT_Half | 9570 bp->port.advertising = (ADVERTISED_100baseT_Half |
8636 ADVERTISED_TP); 9571 ADVERTISED_TP);
8637 } else { 9572 } else {
8638 BNX2X_ERR("NVRAM config error. " 9573 BNX2X_ERROR("NVRAM config error. "
8639 "Invalid link_config 0x%x" 9574 "Invalid link_config 0x%x"
8640 " speed_cap_mask 0x%x\n", 9575 " speed_cap_mask 0x%x\n",
8641 bp->port.link_config, 9576 bp->port.link_config,
8642 bp->link_params.speed_cap_mask); 9577 bp->link_params.speed_cap_mask);
8643 return; 9578 return;
8644 } 9579 }
8645 break; 9580 break;
@@ -8650,11 +9585,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8650 bp->port.advertising = (ADVERTISED_1000baseT_Full | 9585 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8651 ADVERTISED_TP); 9586 ADVERTISED_TP);
8652 } else { 9587 } else {
8653 BNX2X_ERR("NVRAM config error. " 9588 BNX2X_ERROR("NVRAM config error. "
8654 "Invalid link_config 0x%x" 9589 "Invalid link_config 0x%x"
8655 " speed_cap_mask 0x%x\n", 9590 " speed_cap_mask 0x%x\n",
8656 bp->port.link_config, 9591 bp->port.link_config,
8657 bp->link_params.speed_cap_mask); 9592 bp->link_params.speed_cap_mask);
8658 return; 9593 return;
8659 } 9594 }
8660 break; 9595 break;
@@ -8665,11 +9600,11 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8665 bp->port.advertising = (ADVERTISED_2500baseX_Full | 9600 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8666 ADVERTISED_TP); 9601 ADVERTISED_TP);
8667 } else { 9602 } else {
8668 BNX2X_ERR("NVRAM config error. " 9603 BNX2X_ERROR("NVRAM config error. "
8669 "Invalid link_config 0x%x" 9604 "Invalid link_config 0x%x"
8670 " speed_cap_mask 0x%x\n", 9605 " speed_cap_mask 0x%x\n",
8671 bp->port.link_config, 9606 bp->port.link_config,
8672 bp->link_params.speed_cap_mask); 9607 bp->link_params.speed_cap_mask);
8673 return; 9608 return;
8674 } 9609 }
8675 break; 9610 break;
@@ -8682,19 +9617,19 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8682 bp->port.advertising = (ADVERTISED_10000baseT_Full | 9617 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8683 ADVERTISED_FIBRE); 9618 ADVERTISED_FIBRE);
8684 } else { 9619 } else {
8685 BNX2X_ERR("NVRAM config error. " 9620 BNX2X_ERROR("NVRAM config error. "
8686 "Invalid link_config 0x%x" 9621 "Invalid link_config 0x%x"
8687 " speed_cap_mask 0x%x\n", 9622 " speed_cap_mask 0x%x\n",
8688 bp->port.link_config, 9623 bp->port.link_config,
8689 bp->link_params.speed_cap_mask); 9624 bp->link_params.speed_cap_mask);
8690 return; 9625 return;
8691 } 9626 }
8692 break; 9627 break;
8693 9628
8694 default: 9629 default:
8695 BNX2X_ERR("NVRAM config error. " 9630 BNX2X_ERROR("NVRAM config error. "
8696 "BAD link speed link_config 0x%x\n", 9631 "BAD link speed link_config 0x%x\n",
8697 bp->port.link_config); 9632 bp->port.link_config);
8698 bp->link_params.req_line_speed = SPEED_AUTO_NEG; 9633 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8699 bp->port.advertising = bp->port.supported; 9634 bp->port.advertising = bp->port.supported;
8700 break; 9635 break;
@@ -8823,7 +9758,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8823 9758
8824 bp->e1hov = 0; 9759 bp->e1hov = 0;
8825 bp->e1hmf = 0; 9760 bp->e1hmf = 0;
8826 if (CHIP_IS_E1H(bp)) { 9761 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
8827 bp->mf_config = 9762 bp->mf_config =
8828 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config); 9763 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8829 9764
@@ -8844,14 +9779,14 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8844 "(0x%04x)\n", 9779 "(0x%04x)\n",
8845 func, bp->e1hov, bp->e1hov); 9780 func, bp->e1hov, bp->e1hov);
8846 } else { 9781 } else {
8847 BNX2X_ERR("!!! No valid E1HOV for func %d," 9782 BNX2X_ERROR("No valid E1HOV for func %d,"
8848 " aborting\n", func); 9783 " aborting\n", func);
8849 rc = -EPERM; 9784 rc = -EPERM;
8850 } 9785 }
8851 } else { 9786 } else {
8852 if (BP_E1HVN(bp)) { 9787 if (BP_E1HVN(bp)) {
8853 BNX2X_ERR("!!! VN %d in single function mode," 9788 BNX2X_ERROR("VN %d in single function mode,"
8854 " aborting\n", BP_E1HVN(bp)); 9789 " aborting\n", BP_E1HVN(bp));
8855 rc = -EPERM; 9790 rc = -EPERM;
8856 } 9791 }
8857 } 9792 }
@@ -8887,7 +9822,7 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8887 9822
8888 if (BP_NOMCP(bp)) { 9823 if (BP_NOMCP(bp)) {
8889 /* only supposed to happen on emulation/FPGA */ 9824 /* only supposed to happen on emulation/FPGA */
8890 BNX2X_ERR("warning random MAC workaround active\n"); 9825 BNX2X_ERROR("warning: random MAC workaround active\n");
8891 random_ether_addr(bp->dev->dev_addr); 9826 random_ether_addr(bp->dev->dev_addr);
8892 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 9827 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8893 } 9828 }
@@ -8895,6 +9830,70 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8895 return rc; 9830 return rc;
8896} 9831}
8897 9832
9833static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9834{
9835 int cnt, i, block_end, rodi;
9836 char vpd_data[BNX2X_VPD_LEN+1];
9837 char str_id_reg[VENDOR_ID_LEN+1];
9838 char str_id_cap[VENDOR_ID_LEN+1];
9839 u8 len;
9840
9841 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9842 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9843
9844 if (cnt < BNX2X_VPD_LEN)
9845 goto out_not_found;
9846
9847 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9848 PCI_VPD_LRDT_RO_DATA);
9849 if (i < 0)
9850 goto out_not_found;
9851
9852
9853 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9854 pci_vpd_lrdt_size(&vpd_data[i]);
9855
9856 i += PCI_VPD_LRDT_TAG_SIZE;
9857
9858 if (block_end > BNX2X_VPD_LEN)
9859 goto out_not_found;
9860
9861 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9862 PCI_VPD_RO_KEYWORD_MFR_ID);
9863 if (rodi < 0)
9864 goto out_not_found;
9865
9866 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9867
9868 if (len != VENDOR_ID_LEN)
9869 goto out_not_found;
9870
9871 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9872
9873 /* vendor specific info */
9874 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9875 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9876 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9877 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9878
9879 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9880 PCI_VPD_RO_KEYWORD_VENDOR0);
9881 if (rodi >= 0) {
9882 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9883
9884 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9885
9886 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9887 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9888 bp->fw_ver[len] = ' ';
9889 }
9890 }
9891 return;
9892 }
9893out_not_found:
9894 return;
9895}
9896
8898static int __devinit bnx2x_init_bp(struct bnx2x *bp) 9897static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8899{ 9898{
8900 int func = BP_FUNC(bp); 9899 int func = BP_FUNC(bp);
@@ -8912,29 +9911,34 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8912#endif 9911#endif
8913 9912
8914 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 9913 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8915 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 9914 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
8916 9915
8917 rc = bnx2x_get_hwinfo(bp); 9916 rc = bnx2x_get_hwinfo(bp);
8918 9917
9918 bnx2x_read_fwinfo(bp);
8919 /* need to reset chip if undi was active */ 9919 /* need to reset chip if undi was active */
8920 if (!BP_NOMCP(bp)) 9920 if (!BP_NOMCP(bp))
8921 bnx2x_undi_unload(bp); 9921 bnx2x_undi_unload(bp);
8922 9922
8923 if (CHIP_REV_IS_FPGA(bp)) 9923 if (CHIP_REV_IS_FPGA(bp))
8924 pr_err("FPGA detected\n"); 9924 dev_err(&bp->pdev->dev, "FPGA detected\n");
8925 9925
8926 if (BP_NOMCP(bp) && (func == 0)) 9926 if (BP_NOMCP(bp) && (func == 0))
8927 pr_err("MCP disabled, must load devices in order!\n"); 9927 dev_err(&bp->pdev->dev, "MCP disabled, "
9928 "must load devices in order!\n");
8928 9929
8929 /* Set multi queue mode */ 9930 /* Set multi queue mode */
8930 if ((multi_mode != ETH_RSS_MODE_DISABLED) && 9931 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8931 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) { 9932 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8932 pr_err("Multi disabled since int_mode requested is not MSI-X\n"); 9933 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9934 "requested is not MSI-X\n");
8933 multi_mode = ETH_RSS_MODE_DISABLED; 9935 multi_mode = ETH_RSS_MODE_DISABLED;
8934 } 9936 }
8935 bp->multi_mode = multi_mode; 9937 bp->multi_mode = multi_mode;
8936 9938
8937 9939
9940 bp->dev->features |= NETIF_F_GRO;
9941
8938 /* Set TPA flags */ 9942 /* Set TPA flags */
8939 if (disable_tpa) { 9943 if (disable_tpa) {
8940 bp->flags &= ~TPA_ENABLE_FLAG; 9944 bp->flags &= ~TPA_ENABLE_FLAG;
@@ -9304,11 +10308,13 @@ static void bnx2x_get_drvinfo(struct net_device *dev,
9304 bnx2x_release_phy_lock(bp); 10308 bnx2x_release_phy_lock(bp);
9305 } 10309 }
9306 10310
9307 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s", 10311 strncpy(info->fw_version, bp->fw_ver, 32);
10312 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10313 "bc %d.%d.%d%s%s",
9308 (bp->common.bc_ver & 0xff0000) >> 16, 10314 (bp->common.bc_ver & 0xff0000) >> 16,
9309 (bp->common.bc_ver & 0xff00) >> 8, 10315 (bp->common.bc_ver & 0xff00) >> 8,
9310 (bp->common.bc_ver & 0xff), 10316 (bp->common.bc_ver & 0xff),
9311 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver); 10317 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
9312 strcpy(info->bus_info, pci_name(bp->pdev)); 10318 strcpy(info->bus_info, pci_name(bp->pdev));
9313 info->n_stats = BNX2X_NUM_STATS; 10319 info->n_stats = BNX2X_NUM_STATS;
9314 info->testinfo_len = BNX2X_NUM_TESTS; 10320 info->testinfo_len = BNX2X_NUM_TESTS;
@@ -9842,19 +10848,18 @@ static int bnx2x_get_coalesce(struct net_device *dev,
9842 return 0; 10848 return 0;
9843} 10849}
9844 10850
9845#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9846static int bnx2x_set_coalesce(struct net_device *dev, 10851static int bnx2x_set_coalesce(struct net_device *dev,
9847 struct ethtool_coalesce *coal) 10852 struct ethtool_coalesce *coal)
9848{ 10853{
9849 struct bnx2x *bp = netdev_priv(dev); 10854 struct bnx2x *bp = netdev_priv(dev);
9850 10855
9851 bp->rx_ticks = (u16) coal->rx_coalesce_usecs; 10856 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
9852 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT) 10857 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9853 bp->rx_ticks = BNX2X_MAX_COALES_TOUT; 10858 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9854 10859
9855 bp->tx_ticks = (u16) coal->tx_coalesce_usecs; 10860 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
9856 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT) 10861 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9857 bp->tx_ticks = BNX2X_MAX_COALES_TOUT; 10862 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9858 10863
9859 if (netif_running(dev)) 10864 if (netif_running(dev))
9860 bnx2x_update_coalesce(bp); 10865 bnx2x_update_coalesce(bp);
@@ -9885,6 +10890,11 @@ static int bnx2x_set_ringparam(struct net_device *dev,
9885 struct bnx2x *bp = netdev_priv(dev); 10890 struct bnx2x *bp = netdev_priv(dev);
9886 int rc = 0; 10891 int rc = 0;
9887 10892
10893 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10894 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10895 return -EAGAIN;
10896 }
10897
9888 if ((ering->rx_pending > MAX_RX_AVAIL) || 10898 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9889 (ering->tx_pending > MAX_TX_AVAIL) || 10899 (ering->tx_pending > MAX_TX_AVAIL) ||
9890 (ering->tx_pending <= MAX_SKB_FRAGS + 4)) 10900 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
@@ -9970,6 +10980,11 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
9970 int changed = 0; 10980 int changed = 0;
9971 int rc = 0; 10981 int rc = 0;
9972 10982
10983 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10984 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10985 return -EAGAIN;
10986 }
10987
9973 /* TPA requires Rx CSUM offloading */ 10988 /* TPA requires Rx CSUM offloading */
9974 if ((data & ETH_FLAG_LRO) && bp->rx_csum) { 10989 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9975 if (!disable_tpa) { 10990 if (!disable_tpa) {
@@ -10006,6 +11021,11 @@ static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10006 struct bnx2x *bp = netdev_priv(dev); 11021 struct bnx2x *bp = netdev_priv(dev);
10007 int rc = 0; 11022 int rc = 0;
10008 11023
11024 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11025 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11026 return -EAGAIN;
11027 }
11028
10009 bp->rx_csum = data; 11029 bp->rx_csum = data;
10010 11030
10011 /* Disable TPA, when Rx CSUM is disabled. Otherwise all 11031 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
@@ -10050,9 +11070,9 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10050 u32 wr_val = 0; 11070 u32 wr_val = 0;
10051 int port = BP_PORT(bp); 11071 int port = BP_PORT(bp);
10052 static const struct { 11072 static const struct {
10053 u32 offset0; 11073 u32 offset0;
10054 u32 offset1; 11074 u32 offset1;
10055 u32 mask; 11075 u32 mask;
10056 } reg_tbl[] = { 11076 } reg_tbl[] = {
10057/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff }, 11077/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
10058 { DORQ_REG_DB_ADDR0, 4, 0xffffffff }, 11078 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
@@ -10119,15 +11139,19 @@ static int bnx2x_test_registers(struct bnx2x *bp)
10119 11139
10120 save_val = REG_RD(bp, offset); 11140 save_val = REG_RD(bp, offset);
10121 11141
10122 REG_WR(bp, offset, wr_val); 11142 REG_WR(bp, offset, (wr_val & mask));
10123 val = REG_RD(bp, offset); 11143 val = REG_RD(bp, offset);
10124 11144
10125 /* Restore the original register's value */ 11145 /* Restore the original register's value */
10126 REG_WR(bp, offset, save_val); 11146 REG_WR(bp, offset, save_val);
10127 11147
10128 /* verify that value is as expected value */ 11148 /* verify value is as expected */
10129 if ((val & mask) != (wr_val & mask)) 11149 if ((val & mask) != (wr_val & mask)) {
11150 DP(NETIF_MSG_PROBE,
11151 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11152 offset, val, wr_val, mask);
10130 goto test_reg_exit; 11153 goto test_reg_exit;
11154 }
10131 } 11155 }
10132 } 11156 }
10133 11157
@@ -10267,8 +11291,8 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
10267 11291
10268 bd_prod = TX_BD(fp_tx->tx_bd_prod); 11292 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10269 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd; 11293 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
10270 mapping = pci_map_single(bp->pdev, skb->data, 11294 mapping = dma_map_single(&bp->pdev->dev, skb->data,
10271 skb_headlen(skb), PCI_DMA_TODEVICE); 11295 skb_headlen(skb), DMA_TO_DEVICE);
10272 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 11296 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10273 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 11297 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10274 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */ 11298 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
@@ -10344,6 +11368,9 @@ static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10344{ 11368{
10345 int rc = 0, res; 11369 int rc = 0, res;
10346 11370
11371 if (BP_NOMCP(bp))
11372 return rc;
11373
10347 if (!netif_running(bp->dev)) 11374 if (!netif_running(bp->dev))
10348 return BNX2X_LOOPBACK_FAILED; 11375 return BNX2X_LOOPBACK_FAILED;
10349 11376
@@ -10391,6 +11418,9 @@ static int bnx2x_test_nvram(struct bnx2x *bp)
10391 int i, rc; 11418 int i, rc;
10392 u32 magic, crc; 11419 u32 magic, crc;
10393 11420
11421 if (BP_NOMCP(bp))
11422 return 0;
11423
10394 rc = bnx2x_nvram_read(bp, 0, data, 4); 11424 rc = bnx2x_nvram_read(bp, 0, data, 4);
10395 if (rc) { 11425 if (rc) {
10396 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc); 11426 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
@@ -10468,6 +11498,12 @@ static void bnx2x_self_test(struct net_device *dev,
10468{ 11498{
10469 struct bnx2x *bp = netdev_priv(dev); 11499 struct bnx2x *bp = netdev_priv(dev);
10470 11500
11501 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11502 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11503 etest->flags |= ETH_TEST_FL_FAILED;
11504 return;
11505 }
11506
10471 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS); 11507 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10472 11508
10473 if (!netif_running(dev)) 11509 if (!netif_running(dev))
@@ -10556,7 +11592,11 @@ static const struct {
10556 11592
10557/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" }, 11593/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10558 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11594 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10559 8, "[%d]: tx_packets" } 11595 8, "[%d]: tx_ucast_packets" },
11596 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11597 8, "[%d]: tx_mcast_packets" },
11598 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11599 8, "[%d]: tx_bcast_packets" }
10560}; 11600};
10561 11601
10562static const struct { 11602static const struct {
@@ -10618,16 +11658,20 @@ static const struct {
10618 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 11658 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10619 8, STATS_FLAGS_PORT, "tx_error_bytes" }, 11659 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10620 { STATS_OFFSET32(total_unicast_packets_transmitted_hi), 11660 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10621 8, STATS_FLAGS_BOTH, "tx_packets" }, 11661 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11662 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11663 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11664 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11665 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
10622 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi), 11666 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10623 8, STATS_FLAGS_PORT, "tx_mac_errors" }, 11667 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10624 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi), 11668 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10625 8, STATS_FLAGS_PORT, "tx_carrier_errors" }, 11669 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10626 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi), 11670/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10627 8, STATS_FLAGS_PORT, "tx_single_collisions" }, 11671 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10628 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi), 11672 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10629 8, STATS_FLAGS_PORT, "tx_multi_collisions" }, 11673 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10630/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi), 11674 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10631 8, STATS_FLAGS_PORT, "tx_deferred" }, 11675 8, STATS_FLAGS_PORT, "tx_deferred" },
10632 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi), 11676 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10633 8, STATS_FLAGS_PORT, "tx_excess_collisions" }, 11677 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
@@ -10643,11 +11687,11 @@ static const struct {
10643 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" }, 11687 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10644 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi), 11688 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10645 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" }, 11689 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10646 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi), 11690/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10647 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" }, 11691 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10648 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi), 11692 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10649 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" }, 11693 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10650/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi), 11694 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
10651 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" }, 11695 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10652 { STATS_OFFSET32(pause_frames_sent_hi), 11696 { STATS_OFFSET32(pause_frames_sent_hi),
10653 8, STATS_FLAGS_PORT, "tx_pause_frames" } 11697 8, STATS_FLAGS_PORT, "tx_pause_frames" }
@@ -10664,7 +11708,7 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10664 struct bnx2x *bp = netdev_priv(dev); 11708 struct bnx2x *bp = netdev_priv(dev);
10665 int i, num_stats; 11709 int i, num_stats;
10666 11710
10667 switch(stringset) { 11711 switch (stringset) {
10668 case ETH_SS_STATS: 11712 case ETH_SS_STATS:
10669 if (is_multi(bp)) { 11713 if (is_multi(bp)) {
10670 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 11714 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
@@ -10893,6 +11937,14 @@ static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10893 break; 11937 break;
10894 11938
10895 case PCI_D3hot: 11939 case PCI_D3hot:
11940 /* If there are other clients above don't
11941 shut down the power */
11942 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11943 return 0;
11944 /* Don't shut down the power for emulation and FPGA */
11945 if (CHIP_REV_IS_SLOW(bp))
11946 return 0;
11947
10896 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 11948 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10897 pmcsr |= 3; 11949 pmcsr |= 3;
10898 11950
@@ -11182,6 +12234,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11182 int i; 12234 int i;
11183 u8 hlen = 0; 12235 u8 hlen = 0;
11184 __le16 pkt_size = 0; 12236 __le16 pkt_size = 0;
12237 struct ethhdr *eth;
12238 u8 mac_type = UNICAST_ADDRESS;
11185 12239
11186#ifdef BNX2X_STOP_ON_ERROR 12240#ifdef BNX2X_STOP_ON_ERROR
11187 if (unlikely(bp->panic)) 12241 if (unlikely(bp->panic))
@@ -11205,6 +12259,16 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11205 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr, 12259 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
11206 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type); 12260 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
11207 12261
12262 eth = (struct ethhdr *)skb->data;
12263
12264 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12265 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12266 if (is_broadcast_ether_addr(eth->h_dest))
12267 mac_type = BROADCAST_ADDRESS;
12268 else
12269 mac_type = MULTICAST_ADDRESS;
12270 }
12271
11208#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3) 12272#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
11209 /* First, check if we need to linearize the skb (due to FW 12273 /* First, check if we need to linearize the skb (due to FW
11210 restrictions). No need to check fragmentation if page size > 8K 12274 restrictions). No need to check fragmentation if page size > 8K
@@ -11238,8 +12302,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11238 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd; 12302 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
11239 12303
11240 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; 12304 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11241 tx_start_bd->general_data = (UNICAST_ADDRESS << 12305 tx_start_bd->general_data = (mac_type <<
11242 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT); 12306 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
11243 /* header nbd */ 12307 /* header nbd */
11244 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); 12308 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
11245 12309
@@ -11314,8 +12378,8 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11314 } 12378 }
11315 } 12379 }
11316 12380
11317 mapping = pci_map_single(bp->pdev, skb->data, 12381 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11318 skb_headlen(skb), PCI_DMA_TODEVICE); 12382 skb_headlen(skb), DMA_TO_DEVICE);
11319 12383
11320 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12384 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11321 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12385 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11372,8 +12436,9 @@ static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
11372 if (total_pkt_bd == NULL) 12436 if (total_pkt_bd == NULL)
11373 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd; 12437 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11374 12438
11375 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset, 12439 mapping = dma_map_page(&bp->pdev->dev, frag->page,
11376 frag->size, PCI_DMA_TODEVICE); 12440 frag->page_offset,
12441 frag->size, DMA_TO_DEVICE);
11377 12442
11378 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping)); 12443 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11379 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping)); 12444 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
@@ -11452,6 +12517,40 @@ static int bnx2x_open(struct net_device *dev)
11452 12517
11453 bnx2x_set_power_state(bp, PCI_D0); 12518 bnx2x_set_power_state(bp, PCI_D0);
11454 12519
12520 if (!bnx2x_reset_is_done(bp)) {
12521 do {
12522 /* Reset MCP mail box sequence if there is on going
12523 * recovery
12524 */
12525 bp->fw_seq = 0;
12526
12527 /* If it's the first function to load and reset done
12528 * is still not cleared it may mean that. We don't
12529 * check the attention state here because it may have
12530 * already been cleared by a "common" reset but we
12531 * shell proceed with "process kill" anyway.
12532 */
12533 if ((bnx2x_get_load_cnt(bp) == 0) &&
12534 bnx2x_trylock_hw_lock(bp,
12535 HW_LOCK_RESOURCE_RESERVED_08) &&
12536 (!bnx2x_leader_reset(bp))) {
12537 DP(NETIF_MSG_HW, "Recovered in open\n");
12538 break;
12539 }
12540
12541 bnx2x_set_power_state(bp, PCI_D3hot);
12542
12543 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12544 " completed yet. Try again later. If u still see this"
12545 " message after a few retries then power cycle is"
12546 " required.\n", bp->dev->name);
12547
12548 return -EAGAIN;
12549 } while (0);
12550 }
12551
12552 bp->recovery_state = BNX2X_RECOVERY_DONE;
12553
11455 return bnx2x_nic_load(bp, LOAD_OPEN); 12554 return bnx2x_nic_load(bp, LOAD_OPEN);
11456} 12555}
11457 12556
@@ -11462,9 +12561,7 @@ static int bnx2x_close(struct net_device *dev)
11462 12561
11463 /* Unload the driver, release IRQs */ 12562 /* Unload the driver, release IRQs */
11464 bnx2x_nic_unload(bp, UNLOAD_CLOSE); 12563 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11465 if (atomic_read(&bp->pdev->enable_cnt) == 1) 12564 bnx2x_set_power_state(bp, PCI_D3hot);
11466 if (!CHIP_REV_IS_SLOW(bp))
11467 bnx2x_set_power_state(bp, PCI_D3hot);
11468 12565
11469 return 0; 12566 return 0;
11470} 12567}
@@ -11494,21 +12591,21 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11494 else { /* some multicasts */ 12591 else { /* some multicasts */
11495 if (CHIP_IS_E1(bp)) { 12592 if (CHIP_IS_E1(bp)) {
11496 int i, old, offset; 12593 int i, old, offset;
11497 struct dev_mc_list *mclist; 12594 struct netdev_hw_addr *ha;
11498 struct mac_configuration_cmd *config = 12595 struct mac_configuration_cmd *config =
11499 bnx2x_sp(bp, mcast_config); 12596 bnx2x_sp(bp, mcast_config);
11500 12597
11501 i = 0; 12598 i = 0;
11502 netdev_for_each_mc_addr(mclist, dev) { 12599 netdev_for_each_mc_addr(ha, dev) {
11503 config->config_table[i]. 12600 config->config_table[i].
11504 cam_entry.msb_mac_addr = 12601 cam_entry.msb_mac_addr =
11505 swab16(*(u16 *)&mclist->dmi_addr[0]); 12602 swab16(*(u16 *)&ha->addr[0]);
11506 config->config_table[i]. 12603 config->config_table[i].
11507 cam_entry.middle_mac_addr = 12604 cam_entry.middle_mac_addr =
11508 swab16(*(u16 *)&mclist->dmi_addr[2]); 12605 swab16(*(u16 *)&ha->addr[2]);
11509 config->config_table[i]. 12606 config->config_table[i].
11510 cam_entry.lsb_mac_addr = 12607 cam_entry.lsb_mac_addr =
11511 swab16(*(u16 *)&mclist->dmi_addr[4]); 12608 swab16(*(u16 *)&ha->addr[4]);
11512 config->config_table[i].cam_entry.flags = 12609 config->config_table[i].cam_entry.flags =
11513 cpu_to_le16(port); 12610 cpu_to_le16(port);
11514 config->config_table[i]. 12611 config->config_table[i].
@@ -11562,18 +12659,18 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11562 0); 12659 0);
11563 } else { /* E1H */ 12660 } else { /* E1H */
11564 /* Accept one or more multicasts */ 12661 /* Accept one or more multicasts */
11565 struct dev_mc_list *mclist; 12662 struct netdev_hw_addr *ha;
11566 u32 mc_filter[MC_HASH_SIZE]; 12663 u32 mc_filter[MC_HASH_SIZE];
11567 u32 crc, bit, regidx; 12664 u32 crc, bit, regidx;
11568 int i; 12665 int i;
11569 12666
11570 memset(mc_filter, 0, 4 * MC_HASH_SIZE); 12667 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11571 12668
11572 netdev_for_each_mc_addr(mclist, dev) { 12669 netdev_for_each_mc_addr(ha, dev) {
11573 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n", 12670 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11574 mclist->dmi_addr); 12671 ha->addr);
11575 12672
11576 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN); 12673 crc = crc32c_le(0, ha->addr, ETH_ALEN);
11577 bit = (crc >> 24) & 0xff; 12674 bit = (crc >> 24) & 0xff;
11578 regidx = bit >> 5; 12675 regidx = bit >> 5;
11579 bit &= 0x1f; 12676 bit &= 0x1f;
@@ -11690,6 +12787,11 @@ static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11690 struct bnx2x *bp = netdev_priv(dev); 12787 struct bnx2x *bp = netdev_priv(dev);
11691 int rc = 0; 12788 int rc = 0;
11692 12789
12790 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12791 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12792 return -EAGAIN;
12793 }
12794
11693 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) || 12795 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11694 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) 12796 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11695 return -EINVAL; 12797 return -EINVAL;
@@ -11717,7 +12819,7 @@ static void bnx2x_tx_timeout(struct net_device *dev)
11717 bnx2x_panic(); 12819 bnx2x_panic();
11718#endif 12820#endif
11719 /* This allows the netif to be shutdown gracefully before resetting */ 12821 /* This allows the netif to be shutdown gracefully before resetting */
11720 schedule_work(&bp->reset_task); 12822 schedule_delayed_work(&bp->reset_task, 0);
11721} 12823}
11722 12824
11723#ifdef BCM_VLAN 12825#ifdef BCM_VLAN
@@ -11789,18 +12891,21 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11789 12891
11790 rc = pci_enable_device(pdev); 12892 rc = pci_enable_device(pdev);
11791 if (rc) { 12893 if (rc) {
11792 pr_err("Cannot enable PCI device, aborting\n"); 12894 dev_err(&bp->pdev->dev,
12895 "Cannot enable PCI device, aborting\n");
11793 goto err_out; 12896 goto err_out;
11794 } 12897 }
11795 12898
11796 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 12899 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11797 pr_err("Cannot find PCI device base address, aborting\n"); 12900 dev_err(&bp->pdev->dev,
12901 "Cannot find PCI device base address, aborting\n");
11798 rc = -ENODEV; 12902 rc = -ENODEV;
11799 goto err_out_disable; 12903 goto err_out_disable;
11800 } 12904 }
11801 12905
11802 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) { 12906 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11803 pr_err("Cannot find second PCI device base address, aborting\n"); 12907 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12908 " base address, aborting\n");
11804 rc = -ENODEV; 12909 rc = -ENODEV;
11805 goto err_out_disable; 12910 goto err_out_disable;
11806 } 12911 }
@@ -11808,7 +12913,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11808 if (atomic_read(&pdev->enable_cnt) == 1) { 12913 if (atomic_read(&pdev->enable_cnt) == 1) {
11809 rc = pci_request_regions(pdev, DRV_MODULE_NAME); 12914 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11810 if (rc) { 12915 if (rc) {
11811 pr_err("Cannot obtain PCI resources, aborting\n"); 12916 dev_err(&bp->pdev->dev,
12917 "Cannot obtain PCI resources, aborting\n");
11812 goto err_out_disable; 12918 goto err_out_disable;
11813 } 12919 }
11814 12920
@@ -11818,28 +12924,32 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11818 12924
11819 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 12925 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11820 if (bp->pm_cap == 0) { 12926 if (bp->pm_cap == 0) {
11821 pr_err("Cannot find power management capability, aborting\n"); 12927 dev_err(&bp->pdev->dev,
12928 "Cannot find power management capability, aborting\n");
11822 rc = -EIO; 12929 rc = -EIO;
11823 goto err_out_release; 12930 goto err_out_release;
11824 } 12931 }
11825 12932
11826 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP); 12933 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11827 if (bp->pcie_cap == 0) { 12934 if (bp->pcie_cap == 0) {
11828 pr_err("Cannot find PCI Express capability, aborting\n"); 12935 dev_err(&bp->pdev->dev,
12936 "Cannot find PCI Express capability, aborting\n");
11829 rc = -EIO; 12937 rc = -EIO;
11830 goto err_out_release; 12938 goto err_out_release;
11831 } 12939 }
11832 12940
11833 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { 12941 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
11834 bp->flags |= USING_DAC_FLAG; 12942 bp->flags |= USING_DAC_FLAG;
11835 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) { 12943 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
11836 pr_err("pci_set_consistent_dma_mask failed, aborting\n"); 12944 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12945 " failed, aborting\n");
11837 rc = -EIO; 12946 rc = -EIO;
11838 goto err_out_release; 12947 goto err_out_release;
11839 } 12948 }
11840 12949
11841 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) { 12950 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
11842 pr_err("System does not support DMA, aborting\n"); 12951 dev_err(&bp->pdev->dev,
12952 "System does not support DMA, aborting\n");
11843 rc = -EIO; 12953 rc = -EIO;
11844 goto err_out_release; 12954 goto err_out_release;
11845 } 12955 }
@@ -11852,7 +12962,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11852 12962
11853 bp->regview = pci_ioremap_bar(pdev, 0); 12963 bp->regview = pci_ioremap_bar(pdev, 0);
11854 if (!bp->regview) { 12964 if (!bp->regview) {
11855 pr_err("Cannot map register space, aborting\n"); 12965 dev_err(&bp->pdev->dev,
12966 "Cannot map register space, aborting\n");
11856 rc = -ENOMEM; 12967 rc = -ENOMEM;
11857 goto err_out_release; 12968 goto err_out_release;
11858 } 12969 }
@@ -11861,7 +12972,8 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11861 min_t(u64, BNX2X_DB_SIZE, 12972 min_t(u64, BNX2X_DB_SIZE,
11862 pci_resource_len(pdev, 2))); 12973 pci_resource_len(pdev, 2)));
11863 if (!bp->doorbells) { 12974 if (!bp->doorbells) {
11864 pr_err("Cannot map doorbell space, aborting\n"); 12975 dev_err(&bp->pdev->dev,
12976 "Cannot map doorbell space, aborting\n");
11865 rc = -ENOMEM; 12977 rc = -ENOMEM;
11866 goto err_out_unmap; 12978 goto err_out_unmap;
11867 } 12979 }
@@ -11876,6 +12988,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11876 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0); 12988 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11877 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0); 12989 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11878 12990
12991 /* Reset the load counter */
12992 bnx2x_clear_load_cnt(bp);
12993
11879 dev->watchdog_timeo = TX_TIMEOUT; 12994 dev->watchdog_timeo = TX_TIMEOUT;
11880 12995
11881 dev->netdev_ops = &bnx2x_netdev_ops; 12996 dev->netdev_ops = &bnx2x_netdev_ops;
@@ -11963,7 +13078,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11963 offset = be32_to_cpu(sections[i].offset); 13078 offset = be32_to_cpu(sections[i].offset);
11964 len = be32_to_cpu(sections[i].len); 13079 len = be32_to_cpu(sections[i].len);
11965 if (offset + len > firmware->size) { 13080 if (offset + len > firmware->size) {
11966 pr_err("Section %d length is out of bounds\n", i); 13081 dev_err(&bp->pdev->dev,
13082 "Section %d length is out of bounds\n", i);
11967 return -EINVAL; 13083 return -EINVAL;
11968 } 13084 }
11969 } 13085 }
@@ -11975,7 +13091,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11975 13091
11976 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) { 13092 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11977 if (be16_to_cpu(ops_offsets[i]) > num_ops) { 13093 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11978 pr_err("Section offset %d is out of bounds\n", i); 13094 dev_err(&bp->pdev->dev,
13095 "Section offset %d is out of bounds\n", i);
11979 return -EINVAL; 13096 return -EINVAL;
11980 } 13097 }
11981 } 13098 }
@@ -11987,7 +13104,8 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11987 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) || 13104 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11988 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) || 13105 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11989 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) { 13106 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11990 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n", 13107 dev_err(&bp->pdev->dev,
13108 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
11991 fw_ver[0], fw_ver[1], fw_ver[2], 13109 fw_ver[0], fw_ver[1], fw_ver[2],
11992 fw_ver[3], BCM_5710_FW_MAJOR_VERSION, 13110 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11993 BCM_5710_FW_MINOR_VERSION, 13111 BCM_5710_FW_MINOR_VERSION,
@@ -12022,8 +13140,8 @@ static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
12022 for (i = 0, j = 0; i < n/8; i++, j += 2) { 13140 for (i = 0, j = 0; i < n/8; i++, j += 2) {
12023 tmp = be32_to_cpu(source[j]); 13141 tmp = be32_to_cpu(source[j]);
12024 target[i].op = (tmp >> 24) & 0xff; 13142 target[i].op = (tmp >> 24) & 0xff;
12025 target[i].offset = tmp & 0xffffff; 13143 target[i].offset = tmp & 0xffffff;
12026 target[i].raw_data = be32_to_cpu(source[j+1]); 13144 target[i].raw_data = be32_to_cpu(source[j + 1]);
12027 } 13145 }
12028} 13146}
12029 13147
@@ -12057,20 +13175,24 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
12057 13175
12058 if (CHIP_IS_E1(bp)) 13176 if (CHIP_IS_E1(bp))
12059 fw_file_name = FW_FILE_NAME_E1; 13177 fw_file_name = FW_FILE_NAME_E1;
12060 else 13178 else if (CHIP_IS_E1H(bp))
12061 fw_file_name = FW_FILE_NAME_E1H; 13179 fw_file_name = FW_FILE_NAME_E1H;
13180 else {
13181 dev_err(dev, "Unsupported chip revision\n");
13182 return -EINVAL;
13183 }
12062 13184
12063 pr_info("Loading %s\n", fw_file_name); 13185 dev_info(dev, "Loading %s\n", fw_file_name);
12064 13186
12065 rc = request_firmware(&bp->firmware, fw_file_name, dev); 13187 rc = request_firmware(&bp->firmware, fw_file_name, dev);
12066 if (rc) { 13188 if (rc) {
12067 pr_err("Can't load firmware file %s\n", fw_file_name); 13189 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
12068 goto request_firmware_exit; 13190 goto request_firmware_exit;
12069 } 13191 }
12070 13192
12071 rc = bnx2x_check_firmware(bp); 13193 rc = bnx2x_check_firmware(bp);
12072 if (rc) { 13194 if (rc) {
12073 pr_err("Corrupt firmware file %s\n", fw_file_name); 13195 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
12074 goto request_firmware_exit; 13196 goto request_firmware_exit;
12075 } 13197 }
12076 13198
@@ -12129,7 +13251,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12129 /* dev zeroed in init_etherdev */ 13251 /* dev zeroed in init_etherdev */
12130 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT); 13252 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
12131 if (!dev) { 13253 if (!dev) {
12132 pr_err("Cannot allocate net device\n"); 13254 dev_err(&pdev->dev, "Cannot allocate net device\n");
12133 return -ENOMEM; 13255 return -ENOMEM;
12134 } 13256 }
12135 13257
@@ -12151,7 +13273,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12151 /* Set init arrays */ 13273 /* Set init arrays */
12152 rc = bnx2x_init_firmware(bp, &pdev->dev); 13274 rc = bnx2x_init_firmware(bp, &pdev->dev);
12153 if (rc) { 13275 if (rc) {
12154 pr_err("Error loading firmware\n"); 13276 dev_err(&pdev->dev, "Error loading firmware\n");
12155 goto init_one_exit; 13277 goto init_one_exit;
12156 } 13278 }
12157 13279
@@ -12162,11 +13284,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
12162 } 13284 }
12163 13285
12164 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 13286 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
12165 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n", 13287 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
12166 board_info[ent->driver_data].name, 13288 " IRQ %d, ", board_info[ent->driver_data].name,
12167 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4), 13289 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12168 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz", 13290 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
12169 dev->base_addr, bp->pdev->irq, dev->dev_addr); 13291 dev->base_addr, bp->pdev->irq);
13292 pr_cont("node addr %pM\n", dev->dev_addr);
12170 13293
12171 return 0; 13294 return 0;
12172 13295
@@ -12194,13 +13317,16 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
12194 struct bnx2x *bp; 13317 struct bnx2x *bp;
12195 13318
12196 if (!dev) { 13319 if (!dev) {
12197 pr_err("BAD net device from bnx2x_init_one\n"); 13320 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12198 return; 13321 return;
12199 } 13322 }
12200 bp = netdev_priv(dev); 13323 bp = netdev_priv(dev);
12201 13324
12202 unregister_netdev(dev); 13325 unregister_netdev(dev);
12203 13326
13327 /* Make sure RESET task is not scheduled before continuing */
13328 cancel_delayed_work_sync(&bp->reset_task);
13329
12204 kfree(bp->init_ops_offsets); 13330 kfree(bp->init_ops_offsets);
12205 kfree(bp->init_ops); 13331 kfree(bp->init_ops);
12206 kfree(bp->init_data); 13332 kfree(bp->init_data);
@@ -12227,7 +13353,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12227 struct bnx2x *bp; 13353 struct bnx2x *bp;
12228 13354
12229 if (!dev) { 13355 if (!dev) {
12230 pr_err("BAD net device from bnx2x_init_one\n"); 13356 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12231 return -ENODEV; 13357 return -ENODEV;
12232 } 13358 }
12233 bp = netdev_priv(dev); 13359 bp = netdev_priv(dev);
@@ -12259,11 +13385,16 @@ static int bnx2x_resume(struct pci_dev *pdev)
12259 int rc; 13385 int rc;
12260 13386
12261 if (!dev) { 13387 if (!dev) {
12262 pr_err("BAD net device from bnx2x_init_one\n"); 13388 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12263 return -ENODEV; 13389 return -ENODEV;
12264 } 13390 }
12265 bp = netdev_priv(dev); 13391 bp = netdev_priv(dev);
12266 13392
13393 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13394 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13395 return -EAGAIN;
13396 }
13397
12267 rtnl_lock(); 13398 rtnl_lock();
12268 13399
12269 pci_restore_state(pdev); 13400 pci_restore_state(pdev);
@@ -12430,6 +13561,11 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
12430 struct net_device *dev = pci_get_drvdata(pdev); 13561 struct net_device *dev = pci_get_drvdata(pdev);
12431 struct bnx2x *bp = netdev_priv(dev); 13562 struct bnx2x *bp = netdev_priv(dev);
12432 13563
13564 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13565 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13566 return;
13567 }
13568
12433 rtnl_lock(); 13569 rtnl_lock();
12434 13570
12435 bnx2x_eeh_recover(bp); 13571 bnx2x_eeh_recover(bp);
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index 944964e78c81..a1f3bf0cd630 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -766,6 +766,8 @@
766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420 766#define MCP_REG_MCPR_NVM_SW_ARB 0x86420
767#define MCP_REG_MCPR_NVM_WRITE 0x86408 767#define MCP_REG_MCPR_NVM_WRITE 0x86408
768#define MCP_REG_MCPR_SCRATCH 0xa0000 768#define MCP_REG_MCPR_SCRATCH 0xa0000
769#define MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK (0x1<<1)
770#define MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK (0x1<<0)
769/* [R 32] read first 32 bit after inversion of function 0. mapped as 771/* [R 32] read first 32 bit after inversion of function 0. mapped as
770 follows: [0] NIG attention for function0; [1] NIG attention for 772 follows: [0] NIG attention for function0; [1] NIG attention for
771 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp; 773 function1; [2] GPIO1 mcp; [3] GPIO2 mcp; [4] GPIO3 mcp; [5] GPIO4 mcp;
@@ -1249,6 +1251,8 @@
1249#define MISC_REG_E1HMF_MODE 0xa5f8 1251#define MISC_REG_E1HMF_MODE 0xa5f8
1250/* [RW 32] Debug only: spare RW register reset by core reset */ 1252/* [RW 32] Debug only: spare RW register reset by core reset */
1251#define MISC_REG_GENERIC_CR_0 0xa460 1253#define MISC_REG_GENERIC_CR_0 0xa460
1254/* [RW 32] Debug only: spare RW register reset by por reset */
1255#define MISC_REG_GENERIC_POR_1 0xa474
1252/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of 1256/* [RW 32] GPIO. [31-28] FLOAT port 0; [27-24] FLOAT port 0; When any of
1253 these bits is written as a '1'; the corresponding SPIO bit will turn off 1257 these bits is written as a '1'; the corresponding SPIO bit will turn off
1254 it's drivers and become an input. This is the reset state of all GPIO 1258 it's drivers and become an input. This is the reset state of all GPIO
@@ -1438,7 +1442,7 @@
1438 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */ 1442 (~misc_registers_sw_timer_cfg_4.sw_timer_cfg_4[1] ) is set */
1439#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc 1443#define MISC_REG_SW_TIMER_RELOAD_VAL_4 0xa2fc
1440/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses 1444/* [RW 32] the value of the counter for sw timers1-8. there are 8 addresses
1441 in this register. addres 0 - timer 1; address - timer 2�address 7 - 1445 in this register. addres 0 - timer 1; address 1 - timer 2, ... address 7 -
1442 timer 8 */ 1446 timer 8 */
1443#define MISC_REG_SW_TIMER_VAL 0xa5c0 1447#define MISC_REG_SW_TIMER_VAL 0xa5c0
1444/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are 1448/* [RW 1] Set by the MCP to remember if one or more of the drivers is/are
@@ -2407,10 +2411,16 @@
2407/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means 2411/* [R 8] debug only: A bit mask for all PSWHST arbiter clients. '1' means
2408 this client is waiting for the arbiter. */ 2412 this client is waiting for the arbiter. */
2409#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008 2413#define PXP_REG_HST_CLIENTS_WAITING_TO_ARB 0x103008
2414/* [RW 1] When 1; doorbells are discarded and not passed to doorbell queue
2415 block. Should be used for close the gates. */
2416#define PXP_REG_HST_DISCARD_DOORBELLS 0x1030a4
2410/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit 2417/* [R 1] debug only: '1' means this PSWHST is discarding doorbells. This bit
2411 should update accoring to 'hst_discard_doorbells' register when the state 2418 should update accoring to 'hst_discard_doorbells' register when the state
2412 machine is idle */ 2419 machine is idle */
2413#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0 2420#define PXP_REG_HST_DISCARD_DOORBELLS_STATUS 0x1030a0
2421/* [RW 1] When 1; new internal writes arriving to the block are discarded.
2422 Should be used for close the gates. */
2423#define PXP_REG_HST_DISCARD_INTERNAL_WRITES 0x1030a8
2414/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1' 2424/* [R 6] debug only: A bit mask for all PSWHST internal write clients. '1'
2415 means this PSWHST is discarding inputs from this client. Each bit should 2425 means this PSWHST is discarding inputs from this client. Each bit should
2416 update accoring to 'hst_discard_internal_writes' register when the state 2426 update accoring to 'hst_discard_internal_writes' register when the state
@@ -4422,11 +4432,21 @@
4422#define MISC_REGISTERS_GPIO_PORT_SHIFT 4 4432#define MISC_REGISTERS_GPIO_PORT_SHIFT 4
4423#define MISC_REGISTERS_GPIO_SET_POS 8 4433#define MISC_REGISTERS_GPIO_SET_POS 8
4424#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588 4434#define MISC_REGISTERS_RESET_REG_1_CLEAR 0x588
4435#define MISC_REGISTERS_RESET_REG_1_RST_HC (0x1<<29)
4425#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7) 4436#define MISC_REGISTERS_RESET_REG_1_RST_NIG (0x1<<7)
4437#define MISC_REGISTERS_RESET_REG_1_RST_PXP (0x1<<26)
4438#define MISC_REGISTERS_RESET_REG_1_RST_PXPV (0x1<<27)
4426#define MISC_REGISTERS_RESET_REG_1_SET 0x584 4439#define MISC_REGISTERS_RESET_REG_1_SET 0x584
4427#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598 4440#define MISC_REGISTERS_RESET_REG_2_CLEAR 0x598
4428#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0) 4441#define MISC_REGISTERS_RESET_REG_2_RST_BMAC0 (0x1<<0)
4429#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14) 4442#define MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE (0x1<<14)
4443#define MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE (0x1<<15)
4444#define MISC_REGISTERS_RESET_REG_2_RST_GRC (0x1<<4)
4445#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B (0x1<<6)
4446#define MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE (0x1<<5)
4447#define MISC_REGISTERS_RESET_REG_2_RST_MDIO (0x1<<13)
4448#define MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE (0x1<<11)
4449#define MISC_REGISTERS_RESET_REG_2_RST_RBCN (0x1<<9)
4430#define MISC_REGISTERS_RESET_REG_2_SET 0x594 4450#define MISC_REGISTERS_RESET_REG_2_SET 0x594
4431#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8 4451#define MISC_REGISTERS_RESET_REG_3_CLEAR 0x5a8
4432#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1) 4452#define MISC_REGISTERS_RESET_REG_3_MISC_NIG_MUX_SERDES0_IDDQ (0x1<<1)
@@ -4454,6 +4474,7 @@
4454#define HW_LOCK_RESOURCE_GPIO 1 4474#define HW_LOCK_RESOURCE_GPIO 1
4455#define HW_LOCK_RESOURCE_MDIO 0 4475#define HW_LOCK_RESOURCE_MDIO 0
4456#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3 4476#define HW_LOCK_RESOURCE_PORT0_ATT_MASK 3
4477#define HW_LOCK_RESOURCE_RESERVED_08 8
4457#define HW_LOCK_RESOURCE_SPIO 2 4478#define HW_LOCK_RESOURCE_SPIO 2
4458#define HW_LOCK_RESOURCE_UNDI 5 4479#define HW_LOCK_RESOURCE_UNDI 5
4459#define PRS_FLAG_OVERETH_IPV4 1 4480#define PRS_FLAG_OVERETH_IPV4 1
@@ -4474,6 +4495,10 @@
4474#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5) 4495#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 (1<<5)
4475#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9) 4496#define AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 (1<<9)
4476#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12) 4497#define AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR (1<<12)
4498#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY (1<<28)
4499#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY (1<<31)
4500#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY (1<<29)
4501#define AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY (1<<30)
4477#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15) 4502#define AEU_INPUTS_ATTN_BITS_MISC_HW_INTERRUPT (1<<15)
4478#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14) 4503#define AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR (1<<14)
4479#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20) 4504#define AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR (1<<20)
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 6dd64cf3cb76..969ffed86b9f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -37,7 +37,6 @@
37static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr) 37static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
38{ 38{
39 struct inet6_dev *idev; 39 struct inet6_dev *idev;
40 struct inet6_ifaddr *ifa;
41 40
42 if (!dev) 41 if (!dev)
43 return; 42 return;
@@ -47,10 +46,12 @@ static void bond_glean_dev_ipv6(struct net_device *dev, struct in6_addr *addr)
47 return; 46 return;
48 47
49 read_lock_bh(&idev->lock); 48 read_lock_bh(&idev->lock);
50 ifa = idev->addr_list; 49 if (!list_empty(&idev->addr_list)) {
51 if (ifa) 50 struct inet6_ifaddr *ifa
51 = list_first_entry(&idev->addr_list,
52 struct inet6_ifaddr, if_list);
52 ipv6_addr_copy(addr, &ifa->addr); 53 ipv6_addr_copy(addr, &ifa->addr);
53 else 54 } else
54 ipv6_addr_set(addr, 0, 0, 0, 0); 55 ipv6_addr_set(addr, 0, 0, 0, 0);
55 56
56 read_unlock_bh(&idev->lock); 57 read_unlock_bh(&idev->lock);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0075514bf32f..85e813c7762b 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -762,32 +762,6 @@ static int bond_check_dev_link(struct bonding *bond,
762/*----------------------------- Multicast list ------------------------------*/ 762/*----------------------------- Multicast list ------------------------------*/
763 763
764/* 764/*
765 * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
766 */
767static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
768 const struct dev_mc_list *dmi2)
769{
770 return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
771 dmi1->dmi_addrlen == dmi2->dmi_addrlen;
772}
773
774/*
775 * returns dmi entry if found, NULL otherwise
776 */
777static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
778 struct dev_mc_list *mc_list)
779{
780 struct dev_mc_list *idmi;
781
782 for (idmi = mc_list; idmi; idmi = idmi->next) {
783 if (bond_is_dmi_same(dmi, idmi))
784 return idmi;
785 }
786
787 return NULL;
788}
789
790/*
791 * Push the promiscuity flag down to appropriate slaves 765 * Push the promiscuity flag down to appropriate slaves
792 */ 766 */
793static int bond_set_promiscuity(struct bonding *bond, int inc) 767static int bond_set_promiscuity(struct bonding *bond, int inc)
@@ -839,18 +813,18 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
839 * Add a Multicast address to slaves 813 * Add a Multicast address to slaves
840 * according to mode 814 * according to mode
841 */ 815 */
842static void bond_mc_add(struct bonding *bond, void *addr, int alen) 816static void bond_mc_add(struct bonding *bond, void *addr)
843{ 817{
844 if (USES_PRIMARY(bond->params.mode)) { 818 if (USES_PRIMARY(bond->params.mode)) {
845 /* write lock already acquired */ 819 /* write lock already acquired */
846 if (bond->curr_active_slave) 820 if (bond->curr_active_slave)
847 dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0); 821 dev_mc_add(bond->curr_active_slave->dev, addr);
848 } else { 822 } else {
849 struct slave *slave; 823 struct slave *slave;
850 int i; 824 int i;
851 825
852 bond_for_each_slave(bond, slave, i) 826 bond_for_each_slave(bond, slave, i)
853 dev_mc_add(slave->dev, addr, alen, 0); 827 dev_mc_add(slave->dev, addr);
854 } 828 }
855} 829}
856 830
@@ -858,18 +832,17 @@ static void bond_mc_add(struct bonding *bond, void *addr, int alen)
858 * Remove a multicast address from slave 832 * Remove a multicast address from slave
859 * according to mode 833 * according to mode
860 */ 834 */
861static void bond_mc_delete(struct bonding *bond, void *addr, int alen) 835static void bond_mc_del(struct bonding *bond, void *addr)
862{ 836{
863 if (USES_PRIMARY(bond->params.mode)) { 837 if (USES_PRIMARY(bond->params.mode)) {
864 /* write lock already acquired */ 838 /* write lock already acquired */
865 if (bond->curr_active_slave) 839 if (bond->curr_active_slave)
866 dev_mc_delete(bond->curr_active_slave->dev, addr, 840 dev_mc_del(bond->curr_active_slave->dev, addr);
867 alen, 0);
868 } else { 841 } else {
869 struct slave *slave; 842 struct slave *slave;
870 int i; 843 int i;
871 bond_for_each_slave(bond, slave, i) { 844 bond_for_each_slave(bond, slave, i) {
872 dev_mc_delete(slave->dev, addr, alen, 0); 845 dev_mc_del(slave->dev, addr);
873 } 846 }
874 } 847 }
875} 848}
@@ -896,66 +869,22 @@ static void bond_resend_igmp_join_requests(struct bonding *bond)
896} 869}
897 870
898/* 871/*
899 * Totally destroys the mc_list in bond
900 */
901static void bond_mc_list_destroy(struct bonding *bond)
902{
903 struct dev_mc_list *dmi;
904
905 dmi = bond->mc_list;
906 while (dmi) {
907 bond->mc_list = dmi->next;
908 kfree(dmi);
909 dmi = bond->mc_list;
910 }
911
912 bond->mc_list = NULL;
913}
914
915/*
916 * Copy all the Multicast addresses from src to the bonding device dst
917 */
918static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
919 gfp_t gfp_flag)
920{
921 struct dev_mc_list *dmi, *new_dmi;
922
923 for (dmi = mc_list; dmi; dmi = dmi->next) {
924 new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
925
926 if (!new_dmi) {
927 /* FIXME: Potential memory leak !!! */
928 return -ENOMEM;
929 }
930
931 new_dmi->next = bond->mc_list;
932 bond->mc_list = new_dmi;
933 new_dmi->dmi_addrlen = dmi->dmi_addrlen;
934 memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
935 new_dmi->dmi_users = dmi->dmi_users;
936 new_dmi->dmi_gusers = dmi->dmi_gusers;
937 }
938
939 return 0;
940}
941
942/*
943 * flush all members of flush->mc_list from device dev->mc_list 872 * flush all members of flush->mc_list from device dev->mc_list
944 */ 873 */
945static void bond_mc_list_flush(struct net_device *bond_dev, 874static void bond_mc_list_flush(struct net_device *bond_dev,
946 struct net_device *slave_dev) 875 struct net_device *slave_dev)
947{ 876{
948 struct bonding *bond = netdev_priv(bond_dev); 877 struct bonding *bond = netdev_priv(bond_dev);
949 struct dev_mc_list *dmi; 878 struct netdev_hw_addr *ha;
950 879
951 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) 880 netdev_for_each_mc_addr(ha, bond_dev)
952 dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0); 881 dev_mc_del(slave_dev, ha->addr);
953 882
954 if (bond->params.mode == BOND_MODE_8023AD) { 883 if (bond->params.mode == BOND_MODE_8023AD) {
955 /* del lacpdu mc addr from mc list */ 884 /* del lacpdu mc addr from mc list */
956 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 885 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
957 886
958 dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0); 887 dev_mc_del(slave_dev, lacpdu_multicast);
959 } 888 }
960} 889}
961 890
@@ -969,7 +898,7 @@ static void bond_mc_list_flush(struct net_device *bond_dev,
969static void bond_mc_swap(struct bonding *bond, struct slave *new_active, 898static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
970 struct slave *old_active) 899 struct slave *old_active)
971{ 900{
972 struct dev_mc_list *dmi; 901 struct netdev_hw_addr *ha;
973 902
974 if (!USES_PRIMARY(bond->params.mode)) 903 if (!USES_PRIMARY(bond->params.mode))
975 /* nothing to do - mc list is already up-to-date on 904 /* nothing to do - mc list is already up-to-date on
@@ -984,9 +913,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
984 if (bond->dev->flags & IFF_ALLMULTI) 913 if (bond->dev->flags & IFF_ALLMULTI)
985 dev_set_allmulti(old_active->dev, -1); 914 dev_set_allmulti(old_active->dev, -1);
986 915
987 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) 916 netdev_for_each_mc_addr(ha, bond->dev)
988 dev_mc_delete(old_active->dev, dmi->dmi_addr, 917 dev_mc_del(old_active->dev, ha->addr);
989 dmi->dmi_addrlen, 0);
990 } 918 }
991 919
992 if (new_active) { 920 if (new_active) {
@@ -997,9 +925,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
997 if (bond->dev->flags & IFF_ALLMULTI) 925 if (bond->dev->flags & IFF_ALLMULTI)
998 dev_set_allmulti(new_active->dev, 1); 926 dev_set_allmulti(new_active->dev, 1);
999 927
1000 for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next) 928 netdev_for_each_mc_addr(ha, bond->dev)
1001 dev_mc_add(new_active->dev, dmi->dmi_addr, 929 dev_mc_add(new_active->dev, ha->addr);
1002 dmi->dmi_addrlen, 0);
1003 bond_resend_igmp_join_requests(bond); 930 bond_resend_igmp_join_requests(bond);
1004 } 931 }
1005} 932}
@@ -1411,7 +1338,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1411 struct bonding *bond = netdev_priv(bond_dev); 1338 struct bonding *bond = netdev_priv(bond_dev);
1412 const struct net_device_ops *slave_ops = slave_dev->netdev_ops; 1339 const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
1413 struct slave *new_slave = NULL; 1340 struct slave *new_slave = NULL;
1414 struct dev_mc_list *dmi; 1341 struct netdev_hw_addr *ha;
1415 struct sockaddr addr; 1342 struct sockaddr addr;
1416 int link_reporting; 1343 int link_reporting;
1417 int old_features = bond_dev->features; 1344 int old_features = bond_dev->features;
@@ -1485,14 +1412,27 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1485 bond_dev->name, 1412 bond_dev->name,
1486 bond_dev->type, slave_dev->type); 1413 bond_dev->type, slave_dev->type);
1487 1414
1488 netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE); 1415 res = netdev_bonding_change(bond_dev,
1416 NETDEV_PRE_TYPE_CHANGE);
1417 res = notifier_to_errno(res);
1418 if (res) {
1419 pr_err("%s: refused to change device type\n",
1420 bond_dev->name);
1421 res = -EBUSY;
1422 goto err_undo_flags;
1423 }
1424
1425 /* Flush unicast and multicast addresses */
1426 dev_uc_flush(bond_dev);
1427 dev_mc_flush(bond_dev);
1489 1428
1490 if (slave_dev->type != ARPHRD_ETHER) 1429 if (slave_dev->type != ARPHRD_ETHER)
1491 bond_setup_by_slave(bond_dev, slave_dev); 1430 bond_setup_by_slave(bond_dev, slave_dev);
1492 else 1431 else
1493 ether_setup(bond_dev); 1432 ether_setup(bond_dev);
1494 1433
1495 netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE); 1434 netdev_bonding_change(bond_dev,
1435 NETDEV_POST_TYPE_CHANGE);
1496 } 1436 }
1497 } else if (bond_dev->type != slave_dev->type) { 1437 } else if (bond_dev->type != slave_dev->type) {
1498 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n", 1438 pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
@@ -1593,9 +1533,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1593 1533
1594 netif_addr_lock_bh(bond_dev); 1534 netif_addr_lock_bh(bond_dev);
1595 /* upload master's mc_list to new slave */ 1535 /* upload master's mc_list to new slave */
1596 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) 1536 netdev_for_each_mc_addr(ha, bond_dev)
1597 dev_mc_add(slave_dev, dmi->dmi_addr, 1537 dev_mc_add(slave_dev, ha->addr);
1598 dmi->dmi_addrlen, 0);
1599 netif_addr_unlock_bh(bond_dev); 1538 netif_addr_unlock_bh(bond_dev);
1600 } 1539 }
1601 1540
@@ -1603,7 +1542,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1603 /* add lacpdu mc addr to mc list */ 1542 /* add lacpdu mc addr to mc list */
1604 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR; 1543 u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
1605 1544
1606 dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0); 1545 dev_mc_add(slave_dev, lacpdu_multicast);
1607 } 1546 }
1608 1547
1609 bond_add_vlans_on_slave(bond, slave_dev); 1548 bond_add_vlans_on_slave(bond, slave_dev);
@@ -3905,10 +3844,24 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3905 return res; 3844 return res;
3906} 3845}
3907 3846
3847static bool bond_addr_in_mc_list(unsigned char *addr,
3848 struct netdev_hw_addr_list *list,
3849 int addrlen)
3850{
3851 struct netdev_hw_addr *ha;
3852
3853 netdev_hw_addr_list_for_each(ha, list)
3854 if (!memcmp(ha->addr, addr, addrlen))
3855 return true;
3856
3857 return false;
3858}
3859
3908static void bond_set_multicast_list(struct net_device *bond_dev) 3860static void bond_set_multicast_list(struct net_device *bond_dev)
3909{ 3861{
3910 struct bonding *bond = netdev_priv(bond_dev); 3862 struct bonding *bond = netdev_priv(bond_dev);
3911 struct dev_mc_list *dmi; 3863 struct netdev_hw_addr *ha;
3864 bool found;
3912 3865
3913 /* 3866 /*
3914 * Do promisc before checking multicast_mode 3867 * Do promisc before checking multicast_mode
@@ -3943,20 +3896,25 @@ static void bond_set_multicast_list(struct net_device *bond_dev)
3943 bond->flags = bond_dev->flags; 3896 bond->flags = bond_dev->flags;
3944 3897
3945 /* looking for addresses to add to slaves' mc list */ 3898 /* looking for addresses to add to slaves' mc list */
3946 for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) { 3899 netdev_for_each_mc_addr(ha, bond_dev) {
3947 if (!bond_mc_list_find_dmi(dmi, bond->mc_list)) 3900 found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
3948 bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen); 3901 bond_dev->addr_len);
3902 if (!found)
3903 bond_mc_add(bond, ha->addr);
3949 } 3904 }
3950 3905
3951 /* looking for addresses to delete from slaves' list */ 3906 /* looking for addresses to delete from slaves' list */
3952 for (dmi = bond->mc_list; dmi; dmi = dmi->next) { 3907 netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
3953 if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list)) 3908 found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
3954 bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen); 3909 bond_dev->addr_len);
3910 if (!found)
3911 bond_mc_del(bond, ha->addr);
3955 } 3912 }
3956 3913
3957 /* save master's multicast list */ 3914 /* save master's multicast list */
3958 bond_mc_list_destroy(bond); 3915 __hw_addr_flush(&bond->mc_list);
3959 bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC); 3916 __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
3917 bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
3960 3918
3961 read_unlock(&bond->lock); 3919 read_unlock(&bond->lock);
3962} 3920}
@@ -4550,9 +4508,7 @@ static void bond_uninit(struct net_device *bond_dev)
4550 4508
4551 bond_remove_proc_entry(bond); 4509 bond_remove_proc_entry(bond);
4552 4510
4553 netif_addr_lock_bh(bond_dev); 4511 __hw_addr_flush(&bond->mc_list);
4554 bond_mc_list_destroy(bond);
4555 netif_addr_unlock_bh(bond_dev);
4556} 4512}
4557 4513
4558/*------------------------- Module initialization ---------------------------*/ 4514/*------------------------- Module initialization ---------------------------*/
@@ -4683,13 +4639,13 @@ static int bond_check_params(struct bond_params *params)
4683 } 4639 }
4684 4640
4685 if (num_grat_arp < 0 || num_grat_arp > 255) { 4641 if (num_grat_arp < 0 || num_grat_arp > 255) {
4686 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n", 4642 pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
4687 num_grat_arp); 4643 num_grat_arp);
4688 num_grat_arp = 1; 4644 num_grat_arp = 1;
4689 } 4645 }
4690 4646
4691 if (num_unsol_na < 0 || num_unsol_na > 255) { 4647 if (num_unsol_na < 0 || num_unsol_na > 255) {
4692 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n", 4648 pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
4693 num_unsol_na); 4649 num_unsol_na);
4694 num_unsol_na = 1; 4650 num_unsol_na = 1;
4695 } 4651 }
@@ -4924,6 +4880,8 @@ static int bond_init(struct net_device *bond_dev)
4924 list_add_tail(&bond->bond_list, &bn->dev_list); 4880 list_add_tail(&bond->bond_list, &bn->dev_list);
4925 4881
4926 bond_prepare_sysfs_group(bond); 4882 bond_prepare_sysfs_group(bond);
4883
4884 __hw_addr_init(&bond->mc_list);
4927 return 0; 4885 return 0;
4928} 4886}
4929 4887
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 257a7a4dfce9..2aa336720591 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -202,7 +202,7 @@ struct bonding {
202 char proc_file_name[IFNAMSIZ]; 202 char proc_file_name[IFNAMSIZ];
203#endif /* CONFIG_PROC_FS */ 203#endif /* CONFIG_PROC_FS */
204 struct list_head bond_list; 204 struct list_head bond_list;
205 struct dev_mc_list *mc_list; 205 struct netdev_hw_addr_list mc_list;
206 int (*xmit_hash_policy)(struct sk_buff *, int); 206 int (*xmit_hash_policy)(struct sk_buff *, int);
207 __be32 master_ip; 207 __be32 master_ip;
208 u16 flags; 208 u16 flags;
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
new file mode 100644
index 000000000000..0b28e0107697
--- /dev/null
+++ b/drivers/net/caif/Kconfig
@@ -0,0 +1,17 @@
1#
2# CAIF physical drivers
3#
4
5if CAIF
6
7comment "CAIF transport drivers"
8
9config CAIF_TTY
10 tristate "CAIF TTY transport driver"
11 default n
12 ---help---
13 The CAIF TTY transport driver is a Line Discipline (ldisc)
14 identified as N_CAIF. When this ldisc is opened from user space
15 it will redirect the TTY's traffic into the CAIF stack.
16
17endif # CAIF
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
new file mode 100644
index 000000000000..52b6d1f826f8
--- /dev/null
+++ b/drivers/net/caif/Makefile
@@ -0,0 +1,12 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1)
2CAIF_DBG_FLAGS := -DDEBUG
3endif
4
5KBUILD_EXTRA_SYMBOLS=net/caif/Module.symvers
6
7ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
8clean-dirs:= .tmp_versions
9clean-files:= Module.symvers modules.order *.cmd *~ \
10
11# Serial interface
12obj-$(CONFIG_CAIF_TTY) += caif_serial.o
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
new file mode 100644
index 000000000000..38c0186cfbc2
--- /dev/null
+++ b/drivers/net/caif/caif_serial.c
@@ -0,0 +1,446 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/init.h>
8#include <linux/version.h>
9#include <linux/module.h>
10#include <linux/device.h>
11#include <linux/types.h>
12#include <linux/skbuff.h>
13#include <linux/netdevice.h>
14#include <linux/rtnetlink.h>
15#include <linux/tty.h>
16#include <linux/file.h>
17#include <linux/if_arp.h>
18#include <net/caif/caif_device.h>
19#include <net/caif/cfcnfg.h>
20#include <linux/err.h>
21#include <linux/debugfs.h>
22
23MODULE_LICENSE("GPL");
24MODULE_AUTHOR("Sjur Brendeland<sjur.brandeland@stericsson.com>");
25MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
26MODULE_LICENSE("GPL");
27MODULE_ALIAS_LDISC(N_CAIF);
28
29#define SEND_QUEUE_LOW 10
30#define SEND_QUEUE_HIGH 100
31#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
32#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
33#define MAX_WRITE_CHUNK 4096
34#define ON 1
35#define OFF 0
36#define CAIF_MAX_MTU 4096
37
38/*This list is protected by the rtnl lock. */
39static LIST_HEAD(ser_list);
40
41static int ser_loop;
42module_param(ser_loop, bool, S_IRUGO);
43MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
44
45static int ser_use_stx = 1;
46module_param(ser_use_stx, bool, S_IRUGO);
47MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
48
49static int ser_use_fcs = 1;
50
51module_param(ser_use_fcs, bool, S_IRUGO);
52MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
53
54static int ser_write_chunk = MAX_WRITE_CHUNK;
55module_param(ser_write_chunk, int, S_IRUGO);
56
57MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
58
59static struct dentry *debugfsdir;
60
61static int caif_net_open(struct net_device *dev);
62static int caif_net_close(struct net_device *dev);
63
64struct ser_device {
65 struct caif_dev_common common;
66 struct list_head node;
67 struct net_device *dev;
68 struct sk_buff_head head;
69 struct tty_struct *tty;
70 bool tx_started;
71 unsigned long state;
72 char *tty_name;
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debugfs_tty_dir;
75 struct debugfs_blob_wrapper tx_blob;
76 struct debugfs_blob_wrapper rx_blob;
77 u8 rx_data[128];
78 u8 tx_data[128];
79 u8 tty_status;
80
81#endif
82};
83
84static void caifdev_setup(struct net_device *dev);
85static void ldisc_tx_wakeup(struct tty_struct *tty);
86#ifdef CONFIG_DEBUG_FS
87static inline void update_tty_status(struct ser_device *ser)
88{
89 ser->tty_status =
90 ser->tty->stopped << 5 |
91 ser->tty->hw_stopped << 4 |
92 ser->tty->flow_stopped << 3 |
93 ser->tty->packet << 2 |
94 ser->tty->low_latency << 1 |
95 ser->tty->warned;
96}
97static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
98{
99 ser->debugfs_tty_dir =
100 debugfs_create_dir(tty->name, debugfsdir);
101 if (!IS_ERR(ser->debugfs_tty_dir)) {
102 debugfs_create_blob("last_tx_msg", S_IRUSR,
103 ser->debugfs_tty_dir,
104 &ser->tx_blob);
105
106 debugfs_create_blob("last_rx_msg", S_IRUSR,
107 ser->debugfs_tty_dir,
108 &ser->rx_blob);
109
110 debugfs_create_x32("ser_state", S_IRUSR,
111 ser->debugfs_tty_dir,
112 (u32 *)&ser->state);
113
114 debugfs_create_x8("tty_status", S_IRUSR,
115 ser->debugfs_tty_dir,
116 &ser->tty_status);
117
118 }
119 ser->tx_blob.data = ser->tx_data;
120 ser->tx_blob.size = 0;
121 ser->rx_blob.data = ser->rx_data;
122 ser->rx_blob.size = 0;
123}
124
125static inline void debugfs_deinit(struct ser_device *ser)
126{
127 debugfs_remove_recursive(ser->debugfs_tty_dir);
128}
129
130static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
131{
132 if (size > sizeof(ser->rx_data))
133 size = sizeof(ser->rx_data);
134 memcpy(ser->rx_data, data, size);
135 ser->rx_blob.data = ser->rx_data;
136 ser->rx_blob.size = size;
137}
138
139static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
140{
141 if (size > sizeof(ser->tx_data))
142 size = sizeof(ser->tx_data);
143 memcpy(ser->tx_data, data, size);
144 ser->tx_blob.data = ser->tx_data;
145 ser->tx_blob.size = size;
146}
147#else
148static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
149{
150}
151
152static inline void debugfs_deinit(struct ser_device *ser)
153{
154}
155
156static inline void update_tty_status(struct ser_device *ser)
157{
158}
159
160static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
161{
162}
163
164static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
165{
166}
167
168#endif
169
170static void ldisc_receive(struct tty_struct *tty, const u8 *data,
171 char *flags, int count)
172{
173 struct sk_buff *skb = NULL;
174 struct ser_device *ser;
175 int ret;
176 u8 *p;
177 ser = tty->disc_data;
178
179 /*
180 * NOTE: flags may contain information about break or overrun.
181 * This is not yet handled.
182 */
183
184
185 /*
186 * Workaround for garbage at start of transmission,
187 * only enable if STX handling is not enabled.
188 */
189 if (!ser->common.use_stx && !ser->tx_started) {
190 dev_info(&ser->dev->dev,
191 "Bytes received before initial transmission -"
192 "bytes discarded.\n");
193 return;
194 }
195
196 BUG_ON(ser->dev == NULL);
197
198 /* Get a suitable caif packet and copy in data. */
199 skb = netdev_alloc_skb(ser->dev, count+1);
200 BUG_ON(skb == NULL);
201 p = skb_put(skb, count);
202 memcpy(p, data, count);
203
204 skb->protocol = htons(ETH_P_CAIF);
205 skb_reset_mac_header(skb);
206 skb->dev = ser->dev;
207 debugfs_rx(ser, data, count);
208 /* Push received packet up the stack. */
209 ret = netif_rx_ni(skb);
210 if (!ret) {
211 ser->dev->stats.rx_packets++;
212 ser->dev->stats.rx_bytes += count;
213 } else
214 ++ser->dev->stats.rx_dropped;
215 update_tty_status(ser);
216}
217
218static int handle_tx(struct ser_device *ser)
219{
220 struct tty_struct *tty;
221 struct sk_buff *skb;
222 int tty_wr, len, room;
223 tty = ser->tty;
224 ser->tx_started = true;
225
226 /* Enter critical section */
227 if (test_and_set_bit(CAIF_SENDING, &ser->state))
228 return 0;
229
230 /* skb_peek is safe because handle_tx is called after skb_queue_tail */
231 while ((skb = skb_peek(&ser->head)) != NULL) {
232
233 /* Make sure you don't write too much */
234 len = skb->len;
235 room = tty_write_room(tty);
236 if (!room)
237 break;
238 if (room > ser_write_chunk)
239 room = ser_write_chunk;
240 if (len > room)
241 len = room;
242
243 /* Write to tty or loopback */
244 if (!ser_loop) {
245 tty_wr = tty->ops->write(tty, skb->data, len);
246 update_tty_status(ser);
247 } else {
248 tty_wr = len;
249 ldisc_receive(tty, skb->data, NULL, len);
250 }
251 ser->dev->stats.tx_packets++;
252 ser->dev->stats.tx_bytes += tty_wr;
253
254 /* Error on TTY ?! */
255 if (tty_wr < 0)
256 goto error;
257 /* Reduce buffer written, and discard if empty */
258 skb_pull(skb, tty_wr);
259 if (skb->len == 0) {
260 struct sk_buff *tmp = skb_dequeue(&ser->head);
261 BUG_ON(tmp != skb);
262 if (in_interrupt())
263 dev_kfree_skb_irq(skb);
264 else
265 kfree_skb(skb);
266 }
267 }
268 /* Send flow off if queue is empty */
269 if (ser->head.qlen <= SEND_QUEUE_LOW &&
270 test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
271 ser->common.flowctrl != NULL)
272 ser->common.flowctrl(ser->dev, ON);
273 clear_bit(CAIF_SENDING, &ser->state);
274 return 0;
275error:
276 clear_bit(CAIF_SENDING, &ser->state);
277 return tty_wr;
278}
279
280static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
281{
282 struct ser_device *ser;
283 BUG_ON(dev == NULL);
284 ser = netdev_priv(dev);
285
286 /* Send flow off once, on high water mark */
287 if (ser->head.qlen > SEND_QUEUE_HIGH &&
288 !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
289 ser->common.flowctrl != NULL)
290
291 ser->common.flowctrl(ser->dev, OFF);
292
293 skb_queue_tail(&ser->head, skb);
294 return handle_tx(ser);
295}
296
297
298static void ldisc_tx_wakeup(struct tty_struct *tty)
299{
300 struct ser_device *ser;
301 ser = tty->disc_data;
302 BUG_ON(ser == NULL);
303 BUG_ON(ser->tty != tty);
304 handle_tx(ser);
305}
306
307
308static int ldisc_open(struct tty_struct *tty)
309{
310 struct ser_device *ser;
311 struct net_device *dev;
312 char name[64];
313 int result;
314
315 /* No write no play */
316 if (tty->ops->write == NULL)
317 return -EOPNOTSUPP;
318
319 sprintf(name, "cf%s", tty->name);
320 dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
321 ser = netdev_priv(dev);
322 ser->tty = tty_kref_get(tty);
323 ser->dev = dev;
324 debugfs_init(ser, tty);
325 tty->receive_room = N_TTY_BUF_SIZE;
326 tty->disc_data = ser;
327 set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
328 rtnl_lock();
329 result = register_netdevice(dev);
330 if (result) {
331 rtnl_unlock();
332 free_netdev(dev);
333 return -ENODEV;
334 }
335
336 list_add(&ser->node, &ser_list);
337 rtnl_unlock();
338 netif_stop_queue(dev);
339 update_tty_status(ser);
340 return 0;
341}
342
343static void ldisc_close(struct tty_struct *tty)
344{
345 struct ser_device *ser = tty->disc_data;
346 /* Remove may be called inside or outside of rtnl_lock */
347 int islocked = rtnl_is_locked();
348 if (!islocked)
349 rtnl_lock();
350 /* device is freed automagically by net-sysfs */
351 dev_close(ser->dev);
352 unregister_netdevice(ser->dev);
353 list_del(&ser->node);
354 debugfs_deinit(ser);
355 tty_kref_put(ser->tty);
356 if (!islocked)
357 rtnl_unlock();
358}
359
360/* The line discipline structure. */
361static struct tty_ldisc_ops caif_ldisc = {
362 .owner = THIS_MODULE,
363 .magic = TTY_LDISC_MAGIC,
364 .name = "n_caif",
365 .open = ldisc_open,
366 .close = ldisc_close,
367 .receive_buf = ldisc_receive,
368 .write_wakeup = ldisc_tx_wakeup
369};
370
371static int register_ldisc(void)
372{
373 int result;
374 result = tty_register_ldisc(N_CAIF, &caif_ldisc);
375 if (result < 0) {
376 pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
377 result);
378 return result;
379 }
380 return result;
381}
382static const struct net_device_ops netdev_ops = {
383 .ndo_open = caif_net_open,
384 .ndo_stop = caif_net_close,
385 .ndo_start_xmit = caif_xmit
386};
387
388static void caifdev_setup(struct net_device *dev)
389{
390 struct ser_device *serdev = netdev_priv(dev);
391 dev->features = 0;
392 dev->netdev_ops = &netdev_ops;
393 dev->type = ARPHRD_CAIF;
394 dev->flags = IFF_POINTOPOINT | IFF_NOARP;
395 dev->mtu = CAIF_MAX_MTU;
396 dev->hard_header_len = CAIF_NEEDED_HEADROOM;
397 dev->tx_queue_len = 0;
398 dev->destructor = free_netdev;
399 skb_queue_head_init(&serdev->head);
400 serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
401 serdev->common.use_frag = true;
402 serdev->common.use_stx = ser_use_stx;
403 serdev->common.use_fcs = ser_use_fcs;
404 serdev->dev = dev;
405}
406
407
408static int caif_net_open(struct net_device *dev)
409{
410 struct ser_device *ser;
411 ser = netdev_priv(dev);
412 netif_wake_queue(dev);
413 return 0;
414}
415
416static int caif_net_close(struct net_device *dev)
417{
418 netif_stop_queue(dev);
419 return 0;
420}
421
422static int __init caif_ser_init(void)
423{
424 int ret;
425 ret = register_ldisc();
426 debugfsdir = debugfs_create_dir("caif_serial", NULL);
427 return ret;
428}
429
430static void __exit caif_ser_exit(void)
431{
432 struct ser_device *ser = NULL;
433 struct list_head *node;
434 struct list_head *_tmp;
435 list_for_each_safe(node, _tmp, &ser_list) {
436 ser = list_entry(node, struct ser_device, node);
437 dev_close(ser->dev);
438 unregister_netdevice(ser->dev);
439 list_del(node);
440 }
441 tty_unregister_ldisc(N_CAIF);
442 debugfs_remove_recursive(debugfsdir);
443}
444
445module_init(caif_ser_init);
446module_exit(caif_ser_exit);
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index a2f29a38798a..5f983487d6e4 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -35,7 +35,6 @@
35#include <linux/string.h> 35#include <linux/string.h>
36#include <linux/types.h> 36#include <linux/types.h>
37 37
38#include <linux/can.h>
39#include <linux/can/dev.h> 38#include <linux/can/dev.h>
40#include <linux/can/error.h> 39#include <linux/can/error.h>
41 40
@@ -662,7 +661,6 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
662 at91_poll_err_frame(dev, cf, reg_sr); 661 at91_poll_err_frame(dev, cf, reg_sr);
663 netif_receive_skb(skb); 662 netif_receive_skb(skb);
664 663
665 dev->last_rx = jiffies;
666 dev->stats.rx_packets++; 664 dev->stats.rx_packets++;
667 dev->stats.rx_bytes += cf->can_dlc; 665 dev->stats.rx_bytes += cf->can_dlc;
668 666
@@ -899,7 +897,6 @@ static void at91_irq_err(struct net_device *dev)
899 at91_irq_err_state(dev, cf, new_state); 897 at91_irq_err_state(dev, cf, new_state);
900 netif_rx(skb); 898 netif_rx(skb);
901 899
902 dev->last_rx = jiffies;
903 dev->stats.rx_packets++; 900 dev->stats.rx_packets++;
904 dev->stats.rx_bytes += cf->can_dlc; 901 dev->stats.rx_bytes += cf->can_dlc;
905 902
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 03489864376d..d77264ad326d 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -18,7 +18,6 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20 20
21#include <linux/can.h>
22#include <linux/can/dev.h> 21#include <linux/can/dev.h>
23#include <linux/can/error.h> 22#include <linux/can/error.h>
24 23
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index b39b108318b4..8431eb08075d 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -58,7 +58,6 @@
58 * 58 *
59 */ 59 */
60 60
61#include <linux/can.h>
62#include <linux/can/core.h> 61#include <linux/can/core.h>
63#include <linux/can/dev.h> 62#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h> 63#include <linux/can/platform/mcp251x.h>
@@ -923,12 +922,16 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
923 struct net_device *net; 922 struct net_device *net;
924 struct mcp251x_priv *priv; 923 struct mcp251x_priv *priv;
925 struct mcp251x_platform_data *pdata = spi->dev.platform_data; 924 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
925 int model = spi_get_device_id(spi)->driver_data;
926 int ret = -ENODEV; 926 int ret = -ENODEV;
927 927
928 if (!pdata) 928 if (!pdata)
929 /* Platform data is required for osc freq */ 929 /* Platform data is required for osc freq */
930 goto error_out; 930 goto error_out;
931 931
932 if (model)
933 pdata->model = model;
934
932 /* Allocate can/net device */ 935 /* Allocate can/net device */
933 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); 936 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
934 if (!net) { 937 if (!net) {
@@ -1118,6 +1121,15 @@ static int mcp251x_can_resume(struct spi_device *spi)
1118#define mcp251x_can_resume NULL 1121#define mcp251x_can_resume NULL
1119#endif 1122#endif
1120 1123
1124static struct spi_device_id mcp251x_id_table[] = {
1125 { "mcp251x", 0 /* Use pdata.model */ },
1126 { "mcp2510", CAN_MCP251X_MCP2510 },
1127 { "mcp2515", CAN_MCP251X_MCP2515 },
1128 { },
1129};
1130
1131MODULE_DEVICE_TABLE(spi, mcp251x_id_table);
1132
1121static struct spi_driver mcp251x_can_driver = { 1133static struct spi_driver mcp251x_can_driver = {
1122 .driver = { 1134 .driver = {
1123 .name = DEVICE_NAME, 1135 .name = DEVICE_NAME,
@@ -1125,6 +1137,7 @@ static struct spi_driver mcp251x_can_driver = {
1125 .owner = THIS_MODULE, 1137 .owner = THIS_MODULE,
1126 }, 1138 },
1127 1139
1140 .id_table = mcp251x_id_table,
1128 .probe = mcp251x_can_probe, 1141 .probe = mcp251x_can_probe,
1129 .remove = __devexit_p(mcp251x_can_remove), 1142 .remove = __devexit_p(mcp251x_can_remove),
1130 .suspend = mcp251x_can_suspend, 1143 .suspend = mcp251x_can_suspend,
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 03e7c48465a2..225fd147774a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -25,7 +25,6 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/netdevice.h> 27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h> 28#include <linux/can/dev.h>
30#include <linux/of_platform.h> 29#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h> 30#include <sysdev/fsl_soc.h>
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 6b7dd578d417..64c378cd0c34 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -28,7 +28,6 @@
28#include <linux/if_arp.h> 28#include <linux/if_arp.h>
29#include <linux/if_ether.h> 29#include <linux/if_ether.h>
30#include <linux/list.h> 30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h> 31#include <linux/can/dev.h>
33#include <linux/can/error.h> 32#include <linux/can/error.h>
34#include <linux/io.h> 33#include <linux/io.h>
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 9e277d64a318..ae3505afd682 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -53,7 +53,9 @@ config CAN_PLX_PCI
53 Driver supports now: 53 Driver supports now:
54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/) 54 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
55 - Adlink PCI-7841/cPCI-7841 SE card 55 - Adlink PCI-7841/cPCI-7841 SE card
56 - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
57 - esd CAN-PCI/PMC/266
58 - esd CAN-PCIe/2000
56 - Marathon CAN-bus-PCI card (http://www.marathon.ru/) 59 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
57 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) 60 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
58
59endif 61endif
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index 5f53da0bc40c..36f4f9780c30 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -24,7 +24,6 @@
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/can.h>
28#include <linux/can/dev.h> 27#include <linux/can/dev.h>
29#include <linux/io.h> 28#include <linux/io.h>
30 29
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 441e776a7f59..ed004cebd31f 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -36,7 +36,6 @@
36#include <linux/netdevice.h> 36#include <linux/netdevice.h>
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/pci.h> 38#include <linux/pci.h>
39#include <linux/can.h>
40#include <linux/can/dev.h> 39#include <linux/can/dev.h>
41#include <linux/io.h> 40#include <linux/io.h>
42 41
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 4aff4070db96..437b5c716a24 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -27,7 +27,6 @@
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/can.h>
31#include <linux/can/dev.h> 30#include <linux/can/dev.h>
32#include <linux/io.h> 31#include <linux/io.h>
33 32
@@ -41,7 +40,10 @@ MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
41MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " 40MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
42 "Adlink PCI-7841/cPCI-7841 SE, " 41 "Adlink PCI-7841/cPCI-7841 SE, "
43 "Marathon CAN-bus-PCI, " 42 "Marathon CAN-bus-PCI, "
44 "TEWS TECHNOLOGIES TPMC810"); 43 "TEWS TECHNOLOGIES TPMC810, "
44 "esd CAN-PCI/CPCI/PCI104/200, "
45 "esd CAN-PCI/PMC/266, "
46 "esd CAN-PCIe/2000")
45MODULE_LICENSE("GPL v2"); 47MODULE_LICENSE("GPL v2");
46 48
47#define PLX_PCI_MAX_CHAN 2 49#define PLX_PCI_MAX_CHAN 2
@@ -50,11 +52,14 @@ struct plx_pci_card {
50 int channels; /* detected channels count */ 52 int channels; /* detected channels count */
51 struct net_device *net_dev[PLX_PCI_MAX_CHAN]; 53 struct net_device *net_dev[PLX_PCI_MAX_CHAN];
52 void __iomem *conf_addr; 54 void __iomem *conf_addr;
55
56 /* Pointer to device-dependent reset function */
57 void (*reset_func)(struct pci_dev *pdev);
53}; 58};
54 59
55#define PLX_PCI_CAN_CLOCK (16000000 / 2) 60#define PLX_PCI_CAN_CLOCK (16000000 / 2)
56 61
57/* PLX90xx registers */ 62/* PLX9030/9050/9052 registers */
58#define PLX_INTCSR 0x4c /* Interrupt Control/Status */ 63#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
59#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response, 64#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
60 * Serial EEPROM, and Initialization 65 * Serial EEPROM, and Initialization
@@ -66,6 +71,14 @@ struct plx_pci_card {
66#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ 71#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
67#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ 72#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
68 73
74/* PLX9056 registers */
75#define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */
76#define PLX9056_CNTRL 0x6c /* Control / Software Reset */
77
78#define PLX9056_LINTI (1 << 11)
79#define PLX9056_PCI_INT_EN (1 << 8)
80#define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */
81
69/* 82/*
70 * The board configuration is probably following: 83 * The board configuration is probably following:
71 * RX1 is connected to ground. 84 * RX1 is connected to ground.
@@ -101,6 +114,13 @@ struct plx_pci_card {
101#define ADLINK_PCI_VENDOR_ID 0x144A 114#define ADLINK_PCI_VENDOR_ID 0x144A
102#define ADLINK_PCI_DEVICE_ID 0x7841 115#define ADLINK_PCI_DEVICE_ID 0x7841
103 116
117#define ESD_PCI_SUB_SYS_ID_PCI200 0x0004
118#define ESD_PCI_SUB_SYS_ID_PCI266 0x0009
119#define ESD_PCI_SUB_SYS_ID_PMC266 0x000e
120#define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b
121#define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200
122#define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501
123
104#define MARATHON_PCI_DEVICE_ID 0x2715 124#define MARATHON_PCI_DEVICE_ID 0x2715
105 125
106#define TEWS_PCI_VENDOR_ID 0x1498 126#define TEWS_PCI_VENDOR_ID 0x1498
@@ -108,6 +128,7 @@ struct plx_pci_card {
108 128
109static void plx_pci_reset_common(struct pci_dev *pdev); 129static void plx_pci_reset_common(struct pci_dev *pdev);
110static void plx_pci_reset_marathon(struct pci_dev *pdev); 130static void plx_pci_reset_marathon(struct pci_dev *pdev);
131static void plx9056_pci_reset_common(struct pci_dev *pdev);
111 132
112struct plx_pci_channel_map { 133struct plx_pci_channel_map {
113 u32 bar; 134 u32 bar;
@@ -148,6 +169,30 @@ static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
148 /* based on PLX9052 */ 169 /* based on PLX9052 */
149}; 170};
150 171
172static struct plx_pci_card_info plx_pci_card_info_esd200 __devinitdata = {
173 "esd CAN-PCI/CPCI/PCI104/200", 2,
174 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
175 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
176 &plx_pci_reset_common
177 /* based on PLX9030/9050 */
178};
179
180static struct plx_pci_card_info plx_pci_card_info_esd266 __devinitdata = {
181 "esd CAN-PCI/PMC/266", 2,
182 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
183 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
184 &plx9056_pci_reset_common
185 /* based on PLX9056 */
186};
187
188static struct plx_pci_card_info plx_pci_card_info_esd2000 __devinitdata = {
189 "esd CAN-PCIe/2000", 2,
190 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
191 {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
192 &plx9056_pci_reset_common
193 /* based on PEX8311 */
194};
195
151static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = { 196static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
152 "Marathon CAN-bus-PCI", 2, 197 "Marathon CAN-bus-PCI", 2,
153 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, 198 PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
@@ -180,6 +225,48 @@ static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
180 (kernel_ulong_t)&plx_pci_card_info_adlink_se 225 (kernel_ulong_t)&plx_pci_card_info_adlink_se
181 }, 226 },
182 { 227 {
228 /* esd CAN-PCI/200 */
229 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
230 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
231 0, 0,
232 (kernel_ulong_t)&plx_pci_card_info_esd200
233 },
234 {
235 /* esd CAN-CPCI/200 */
236 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
237 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
238 0, 0,
239 (kernel_ulong_t)&plx_pci_card_info_esd200
240 },
241 {
242 /* esd CAN-PCI104/200 */
243 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
244 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
245 0, 0,
246 (kernel_ulong_t)&plx_pci_card_info_esd200
247 },
248 {
249 /* esd CAN-PCI/266 */
250 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
251 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
252 0, 0,
253 (kernel_ulong_t)&plx_pci_card_info_esd266
254 },
255 {
256 /* esd CAN-PMC/266 */
257 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
258 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
259 0, 0,
260 (kernel_ulong_t)&plx_pci_card_info_esd266
261 },
262 {
263 /* esd CAN-PCIE/2000 */
264 PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
265 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
266 0, 0,
267 (kernel_ulong_t)&plx_pci_card_info_esd2000
268 },
269 {
183 /* Marathon CAN-bus-PCI card */ 270 /* Marathon CAN-bus-PCI card */
184 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, 271 PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
185 PCI_ANY_ID, PCI_ANY_ID, 272 PCI_ANY_ID, PCI_ANY_ID,
@@ -242,7 +329,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
242} 329}
243 330
244/* 331/*
245 * PLX90xx software reset 332 * PLX9030/50/52 software reset
246 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired). 333 * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
247 * For most cards it's enough for reset the SJA1000 chips. 334 * For most cards it's enough for reset the SJA1000 chips.
248 */ 335 */
@@ -259,6 +346,38 @@ static void plx_pci_reset_common(struct pci_dev *pdev)
259 iowrite32(cntrl, card->conf_addr + PLX_CNTRL); 346 iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
260}; 347};
261 348
349/*
350 * PLX9056 software reset
351 * Assert LRESET# and reset device(s) on the Local Bus (if wired).
352 */
353static void plx9056_pci_reset_common(struct pci_dev *pdev)
354{
355 struct plx_pci_card *card = pci_get_drvdata(pdev);
356 u32 cntrl;
357
358 /* issue a local bus reset */
359 cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
360 cntrl |= PLX_PCI_RESET;
361 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
362 udelay(100);
363 cntrl ^= PLX_PCI_RESET;
364 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
365
366 /* reload local configuration from EEPROM */
367 cntrl |= PLX9056_PCI_RCR;
368 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
369
370 /*
371 * There is no safe way to poll for the end
372 * of reconfiguration process. Waiting for 10ms
373 * is safe.
374 */
375 mdelay(10);
376
377 cntrl ^= PLX9056_PCI_RCR;
378 iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
379};
380
262/* Special reset function for Marathon card */ 381/* Special reset function for Marathon card */
263static void plx_pci_reset_marathon(struct pci_dev *pdev) 382static void plx_pci_reset_marathon(struct pci_dev *pdev)
264{ 383{
@@ -302,13 +421,16 @@ static void plx_pci_del_card(struct pci_dev *pdev)
302 free_sja1000dev(dev); 421 free_sja1000dev(dev);
303 } 422 }
304 423
305 plx_pci_reset_common(pdev); 424 card->reset_func(pdev);
306 425
307 /* 426 /*
308 * Disable interrupts from PCI-card (PLX90xx) and disable Local_1, 427 * Disable interrupts from PCI-card and disable local
309 * Local_2 interrupts 428 * interrupts
310 */ 429 */
311 iowrite32(0x0, card->conf_addr + PLX_INTCSR); 430 if (pdev->device != PCI_DEVICE_ID_PLX_9056)
431 iowrite32(0x0, card->conf_addr + PLX_INTCSR);
432 else
433 iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
312 434
313 if (card->conf_addr) 435 if (card->conf_addr)
314 pci_iounmap(pdev, card->conf_addr); 436 pci_iounmap(pdev, card->conf_addr);
@@ -367,6 +489,7 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
367 card->conf_addr = addr + ci->conf_map.offset; 489 card->conf_addr = addr + ci->conf_map.offset;
368 490
369 ci->reset_func(pdev); 491 ci->reset_func(pdev);
492 card->reset_func = ci->reset_func;
370 493
371 /* Detect available channels */ 494 /* Detect available channels */
372 for (i = 0; i < ci->channel_count; i++) { 495 for (i = 0; i < ci->channel_count; i++) {
@@ -438,10 +561,17 @@ static int __devinit plx_pci_add_card(struct pci_dev *pdev,
438 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, 561 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
439 * Local_2 interrupts from the SJA1000 chips 562 * Local_2 interrupts from the SJA1000 chips
440 */ 563 */
441 val = ioread32(card->conf_addr + PLX_INTCSR); 564 if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
442 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; 565 val = ioread32(card->conf_addr + PLX_INTCSR);
443 iowrite32(val, card->conf_addr + PLX_INTCSR); 566 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
444 567 val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
568 else
569 val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
570 iowrite32(val, card->conf_addr + PLX_INTCSR);
571 } else {
572 iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
573 card->conf_addr + PLX9056_INTCSR);
574 }
445 return 0; 575 return 0;
446 576
447failure_cleanup: 577failure_cleanup:
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 145b1a731a53..618c11222abc 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -60,7 +60,6 @@
60#include <linux/skbuff.h> 60#include <linux/skbuff.h>
61#include <linux/delay.h> 61#include <linux/delay.h>
62 62
63#include <linux/can.h>
64#include <linux/can/dev.h> 63#include <linux/can/dev.h>
65#include <linux/can/error.h> 64#include <linux/can/error.h>
66 65
diff --git a/drivers/net/can/sja1000/sja1000_isa.c b/drivers/net/can/sja1000/sja1000_isa.c
index a6a51f155962..496223e9e2fc 100644
--- a/drivers/net/can/sja1000/sja1000_isa.c
+++ b/drivers/net/can/sja1000/sja1000_isa.c
@@ -23,7 +23,6 @@
23#include <linux/delay.h> 23#include <linux/delay.h>
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/can.h>
27#include <linux/can/dev.h> 26#include <linux/can/dev.h>
28#include <linux/can/platform/sja1000.h> 27#include <linux/can/platform/sja1000.h>
29 28
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 9dd076a626a5..34e79efbd2fc 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -38,7 +38,6 @@
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/netdevice.h> 39#include <linux/netdevice.h>
40#include <linux/delay.h> 40#include <linux/delay.h>
41#include <linux/can.h>
42#include <linux/can/dev.h> 41#include <linux/can/dev.h>
43 42
44#include <linux/of_platform.h> 43#include <linux/of_platform.h>
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 628374c2a05f..b65cabb361ab 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -24,7 +24,6 @@
24#include <linux/pci.h> 24#include <linux/pci.h>
25#include <linux/platform_device.h> 25#include <linux/platform_device.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/can.h>
28#include <linux/can/dev.h> 27#include <linux/can/dev.h>
29#include <linux/can/platform/sja1000.h> 28#include <linux/can/platform/sja1000.h>
30#include <linux/io.h> 29#include <linux/io.h>
@@ -37,16 +36,36 @@ MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
37MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); 36MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
38MODULE_LICENSE("GPL v2"); 37MODULE_LICENSE("GPL v2");
39 38
40static u8 sp_read_reg(const struct sja1000_priv *priv, int reg) 39static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg)
41{ 40{
42 return ioread8(priv->reg_base + reg); 41 return ioread8(priv->reg_base + reg);
43} 42}
44 43
45static void sp_write_reg(const struct sja1000_priv *priv, int reg, u8 val) 44static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val)
46{ 45{
47 iowrite8(val, priv->reg_base + reg); 46 iowrite8(val, priv->reg_base + reg);
48} 47}
49 48
49static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg)
50{
51 return ioread8(priv->reg_base + reg * 2);
52}
53
54static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val)
55{
56 iowrite8(val, priv->reg_base + reg * 2);
57}
58
59static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg)
60{
61 return ioread8(priv->reg_base + reg * 4);
62}
63
64static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
65{
66 iowrite8(val, priv->reg_base + reg * 4);
67}
68
50static int sp_probe(struct platform_device *pdev) 69static int sp_probe(struct platform_device *pdev)
51{ 70{
52 int err; 71 int err;
@@ -90,14 +109,28 @@ static int sp_probe(struct platform_device *pdev)
90 priv = netdev_priv(dev); 109 priv = netdev_priv(dev);
91 110
92 dev->irq = res_irq->start; 111 dev->irq = res_irq->start;
93 priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; 112 priv->irq_flags = res_irq->flags & (IRQF_TRIGGER_MASK | IRQF_SHARED);
94 priv->reg_base = addr; 113 priv->reg_base = addr;
95 priv->read_reg = sp_read_reg;
96 priv->write_reg = sp_write_reg;
97 priv->can.clock.freq = pdata->clock; 114 priv->can.clock.freq = pdata->clock;
98 priv->ocr = pdata->ocr; 115 priv->ocr = pdata->ocr;
99 priv->cdr = pdata->cdr; 116 priv->cdr = pdata->cdr;
100 117
118 switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
119 case IORESOURCE_MEM_32BIT:
120 priv->read_reg = sp_read_reg32;
121 priv->write_reg = sp_write_reg32;
122 break;
123 case IORESOURCE_MEM_16BIT:
124 priv->read_reg = sp_read_reg16;
125 priv->write_reg = sp_write_reg16;
126 break;
127 case IORESOURCE_MEM_8BIT:
128 default:
129 priv->read_reg = sp_read_reg8;
130 priv->write_reg = sp_write_reg8;
131 break;
132 }
133
101 dev_set_drvdata(&pdev->dev, dev); 134 dev_set_drvdata(&pdev->dev, dev);
102 SET_NETDEV_DEV(dev, &pdev->dev); 135 SET_NETDEV_DEV(dev, &pdev->dev);
103 136
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 0c3d2ba0d178..4d07f1ee7168 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -47,7 +47,6 @@
47#include <linux/platform_device.h> 47#include <linux/platform_device.h>
48#include <linux/clk.h> 48#include <linux/clk.h>
49 49
50#include <linux/can.h>
51#include <linux/can/dev.h> 50#include <linux/can/dev.h>
52#include <linux/can/error.h> 51#include <linux/can/error.h>
53#include <linux/can/platform/ti_hecc.h> 52#include <linux/can/platform/ti_hecc.h>
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index 9bd155e4111c..bd857a20a755 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2957,20 +2957,20 @@ static void cas_process_mc_list(struct cas *cp)
2957{ 2957{
2958 u16 hash_table[16]; 2958 u16 hash_table[16];
2959 u32 crc; 2959 u32 crc;
2960 struct dev_mc_list *dmi; 2960 struct netdev_hw_addr *ha;
2961 int i = 1; 2961 int i = 1;
2962 2962
2963 memset(hash_table, 0, sizeof(hash_table)); 2963 memset(hash_table, 0, sizeof(hash_table));
2964 netdev_for_each_mc_addr(dmi, cp->dev) { 2964 netdev_for_each_mc_addr(ha, cp->dev) {
2965 if (i <= CAS_MC_EXACT_MATCH_SIZE) { 2965 if (i <= CAS_MC_EXACT_MATCH_SIZE) {
2966 /* use the alternate mac address registers for the 2966 /* use the alternate mac address registers for the
2967 * first 15 multicast addresses 2967 * first 15 multicast addresses
2968 */ 2968 */
2969 writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5], 2969 writel((ha->addr[4] << 8) | ha->addr[5],
2970 cp->regs + REG_MAC_ADDRN(i*3 + 0)); 2970 cp->regs + REG_MAC_ADDRN(i*3 + 0));
2971 writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3], 2971 writel((ha->addr[2] << 8) | ha->addr[3],
2972 cp->regs + REG_MAC_ADDRN(i*3 + 1)); 2972 cp->regs + REG_MAC_ADDRN(i*3 + 1));
2973 writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1], 2973 writel((ha->addr[0] << 8) | ha->addr[1],
2974 cp->regs + REG_MAC_ADDRN(i*3 + 2)); 2974 cp->regs + REG_MAC_ADDRN(i*3 + 2));
2975 i++; 2975 i++;
2976 } 2976 }
@@ -2978,7 +2978,7 @@ static void cas_process_mc_list(struct cas *cp)
2978 /* use hw hash table for the next series of 2978 /* use hw hash table for the next series of
2979 * multicast addresses 2979 * multicast addresses
2980 */ 2980 */
2981 crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr); 2981 crc = ether_crc_le(ETH_ALEN, ha->addr);
2982 crc >>= 24; 2982 crc >>= 24;
2983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); 2983 hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
2984 } 2984 }
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 9e631b9d3948..7dbb16d36fff 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -377,12 +377,13 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
377 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN; 377 rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
378 } else if (t1_rx_mode_mc_cnt(rm)) { 378 } else if (t1_rx_mode_mc_cnt(rm)) {
379 /* Accept one or more multicast(s). */ 379 /* Accept one or more multicast(s). */
380 struct dev_mc_list *dmi; 380 struct netdev_hw_addr *ha;
381 int bit; 381 int bit;
382 u16 mc_filter[4] = { 0, }; 382 u16 mc_filter[4] = { 0, };
383 383
384 netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) { 384 netdev_for_each_mc_addr(ha, t1_get_netdev(rm)) {
385 bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */ 385 /* bit[23:28] */
386 bit = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x3f;
386 mc_filter[bit >> 4] |= 1 << (bit & 0xf); 387 mc_filter[bit >> 4] |= 1 << (bit & 0xf);
387 } 388 }
388 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]); 389 pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index df3a1410696e..f01cfdb995de 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -162,14 +162,14 @@ struct respQ_e {
162 */ 162 */
163struct cmdQ_ce { 163struct cmdQ_ce {
164 struct sk_buff *skb; 164 struct sk_buff *skb;
165 DECLARE_PCI_UNMAP_ADDR(dma_addr); 165 DEFINE_DMA_UNMAP_ADDR(dma_addr);
166 DECLARE_PCI_UNMAP_LEN(dma_len); 166 DEFINE_DMA_UNMAP_LEN(dma_len);
167}; 167};
168 168
169struct freelQ_ce { 169struct freelQ_ce {
170 struct sk_buff *skb; 170 struct sk_buff *skb;
171 DECLARE_PCI_UNMAP_ADDR(dma_addr); 171 DEFINE_DMA_UNMAP_ADDR(dma_addr);
172 DECLARE_PCI_UNMAP_LEN(dma_len); 172 DEFINE_DMA_UNMAP_LEN(dma_len);
173}; 173};
174 174
175/* 175/*
@@ -460,7 +460,7 @@ static struct sk_buff *sched_skb(struct sge *sge, struct sk_buff *skb,
460 460
461again: 461again:
462 for (i = 0; i < MAX_NPORTS; i++) { 462 for (i = 0; i < MAX_NPORTS; i++) {
463 s->port = ++s->port & (MAX_NPORTS - 1); 463 s->port = (s->port + 1) & (MAX_NPORTS - 1);
464 skbq = &s->p[s->port].skbq; 464 skbq = &s->p[s->port].skbq;
465 465
466 skb = skb_peek(skbq); 466 skb = skb_peek(skbq);
@@ -518,8 +518,8 @@ static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
518 while (q->credits--) { 518 while (q->credits--) {
519 struct freelQ_ce *ce = &q->centries[cidx]; 519 struct freelQ_ce *ce = &q->centries[cidx];
520 520
521 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 521 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
522 pci_unmap_len(ce, dma_len), 522 dma_unmap_len(ce, dma_len),
523 PCI_DMA_FROMDEVICE); 523 PCI_DMA_FROMDEVICE);
524 dev_kfree_skb(ce->skb); 524 dev_kfree_skb(ce->skb);
525 ce->skb = NULL; 525 ce->skb = NULL;
@@ -633,9 +633,9 @@ static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
633 q->in_use -= n; 633 q->in_use -= n;
634 ce = &q->centries[cidx]; 634 ce = &q->centries[cidx];
635 while (n--) { 635 while (n--) {
636 if (likely(pci_unmap_len(ce, dma_len))) { 636 if (likely(dma_unmap_len(ce, dma_len))) {
637 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 637 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
638 pci_unmap_len(ce, dma_len), 638 dma_unmap_len(ce, dma_len),
639 PCI_DMA_TODEVICE); 639 PCI_DMA_TODEVICE);
640 if (q->sop) 640 if (q->sop)
641 q->sop = 0; 641 q->sop = 0;
@@ -851,8 +851,8 @@ static void refill_free_list(struct sge *sge, struct freelQ *q)
851 skb_reserve(skb, sge->rx_pkt_pad); 851 skb_reserve(skb, sge->rx_pkt_pad);
852 852
853 ce->skb = skb; 853 ce->skb = skb;
854 pci_unmap_addr_set(ce, dma_addr, mapping); 854 dma_unmap_addr_set(ce, dma_addr, mapping);
855 pci_unmap_len_set(ce, dma_len, dma_len); 855 dma_unmap_len_set(ce, dma_len, dma_len);
856 e->addr_lo = (u32)mapping; 856 e->addr_lo = (u32)mapping;
857 e->addr_hi = (u64)mapping >> 32; 857 e->addr_hi = (u64)mapping >> 32;
858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit); 858 e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
@@ -1059,13 +1059,13 @@ static inline struct sk_buff *get_packet(struct pci_dev *pdev,
1059 skb_reserve(skb, 2); /* align IP header */ 1059 skb_reserve(skb, 2); /* align IP header */
1060 skb_put(skb, len); 1060 skb_put(skb, len);
1061 pci_dma_sync_single_for_cpu(pdev, 1061 pci_dma_sync_single_for_cpu(pdev,
1062 pci_unmap_addr(ce, dma_addr), 1062 dma_unmap_addr(ce, dma_addr),
1063 pci_unmap_len(ce, dma_len), 1063 dma_unmap_len(ce, dma_len),
1064 PCI_DMA_FROMDEVICE); 1064 PCI_DMA_FROMDEVICE);
1065 skb_copy_from_linear_data(ce->skb, skb->data, len); 1065 skb_copy_from_linear_data(ce->skb, skb->data, len);
1066 pci_dma_sync_single_for_device(pdev, 1066 pci_dma_sync_single_for_device(pdev,
1067 pci_unmap_addr(ce, dma_addr), 1067 dma_unmap_addr(ce, dma_addr),
1068 pci_unmap_len(ce, dma_len), 1068 dma_unmap_len(ce, dma_len),
1069 PCI_DMA_FROMDEVICE); 1069 PCI_DMA_FROMDEVICE);
1070 recycle_fl_buf(fl, fl->cidx); 1070 recycle_fl_buf(fl, fl->cidx);
1071 return skb; 1071 return skb;
@@ -1077,8 +1077,8 @@ use_orig_buf:
1077 return NULL; 1077 return NULL;
1078 } 1078 }
1079 1079
1080 pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr), 1080 pci_unmap_single(pdev, dma_unmap_addr(ce, dma_addr),
1081 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1081 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1082 skb = ce->skb; 1082 skb = ce->skb;
1083 prefetch(skb->data); 1083 prefetch(skb->data);
1084 1084
@@ -1100,8 +1100,8 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
1100 struct freelQ_ce *ce = &fl->centries[fl->cidx]; 1100 struct freelQ_ce *ce = &fl->centries[fl->cidx];
1101 struct sk_buff *skb = ce->skb; 1101 struct sk_buff *skb = ce->skb;
1102 1102
1103 pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr), 1103 pci_dma_sync_single_for_cpu(adapter->pdev, dma_unmap_addr(ce, dma_addr),
1104 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE); 1104 dma_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
1105 pr_err("%s: unexpected offload packet, cmd %u\n", 1105 pr_err("%s: unexpected offload packet, cmd %u\n",
1106 adapter->name, *skb->data); 1106 adapter->name, *skb->data);
1107 recycle_fl_buf(fl, fl->cidx); 1107 recycle_fl_buf(fl, fl->cidx);
@@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
1123 1123
1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { 1124 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags; 1125 unsigned int nfrags = skb_shinfo(skb)->nr_frags;
1126 unsigned int i, len = skb->len - skb->data_len; 1126 unsigned int i, len = skb_headlen(skb);
1127 while (len > SGE_TX_DESC_MAX_PLEN) { 1127 while (len > SGE_TX_DESC_MAX_PLEN) {
1128 count++; 1128 count++;
1129 len -= SGE_TX_DESC_MAX_PLEN; 1129 len -= SGE_TX_DESC_MAX_PLEN;
@@ -1182,7 +1182,7 @@ static inline unsigned int write_large_page_tx_descs(unsigned int pidx,
1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN, 1182 write_tx_desc(e1, *desc_mapping, SGE_TX_DESC_MAX_PLEN,
1183 *gen, nfrags == 0 && *desc_len == 0); 1183 *gen, nfrags == 0 && *desc_len == 0);
1184 ce1->skb = NULL; 1184 ce1->skb = NULL;
1185 pci_unmap_len_set(ce1, dma_len, 0); 1185 dma_unmap_len_set(ce1, dma_len, 0);
1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN; 1186 *desc_mapping += SGE_TX_DESC_MAX_PLEN;
1187 if (*desc_len) { 1187 if (*desc_len) {
1188 ce1++; 1188 ce1++;
@@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1219 ce = &q->centries[pidx]; 1219 ce = &q->centries[pidx];
1220 1220
1221 mapping = pci_map_single(adapter->pdev, skb->data, 1221 mapping = pci_map_single(adapter->pdev, skb->data,
1222 skb->len - skb->data_len, PCI_DMA_TODEVICE); 1222 skb_headlen(skb), PCI_DMA_TODEVICE);
1223 1223
1224 desc_mapping = mapping; 1224 desc_mapping = mapping;
1225 desc_len = skb->len - skb->data_len; 1225 desc_len = skb_headlen(skb);
1226 1226
1227 flags = F_CMD_DATAVALID | F_CMD_SOP | 1227 flags = F_CMD_DATAVALID | F_CMD_SOP |
1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | 1228 V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@@ -1233,7 +1233,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1233 e->addr_hi = (u64)desc_mapping >> 32; 1233 e->addr_hi = (u64)desc_mapping >> 32;
1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen); 1234 e->len_gen = V_CMD_LEN(first_desc_len) | V_CMD_GEN1(gen);
1235 ce->skb = NULL; 1235 ce->skb = NULL;
1236 pci_unmap_len_set(ce, dma_len, 0); 1236 dma_unmap_len_set(ce, dma_len, 0);
1237 1237
1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN && 1238 if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN &&
1239 desc_len > SGE_TX_DESC_MAX_PLEN) { 1239 desc_len > SGE_TX_DESC_MAX_PLEN) {
@@ -1257,8 +1257,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1257 } 1257 }
1258 1258
1259 ce->skb = NULL; 1259 ce->skb = NULL;
1260 pci_unmap_addr_set(ce, dma_addr, mapping); 1260 dma_unmap_addr_set(ce, dma_addr, mapping);
1261 pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len); 1261 dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
1262 1262
1263 for (i = 0; nfrags--; i++) { 1263 for (i = 0; nfrags--; i++) {
1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1264 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -1284,8 +1284,8 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
1284 write_tx_desc(e1, desc_mapping, desc_len, gen, 1284 write_tx_desc(e1, desc_mapping, desc_len, gen,
1285 nfrags == 0); 1285 nfrags == 0);
1286 ce->skb = NULL; 1286 ce->skb = NULL;
1287 pci_unmap_addr_set(ce, dma_addr, mapping); 1287 dma_unmap_addr_set(ce, dma_addr, mapping);
1288 pci_unmap_len_set(ce, dma_len, frag->size); 1288 dma_unmap_len_set(ce, dma_len, frag->size);
1289 } 1289 }
1290 ce->skb = skb; 1290 ce->skb = skb;
1291 wmb(); 1291 wmb();
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 60777fd90b33..bdfff784645c 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,7 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
328 328
329static void cpmac_set_multicast_list(struct net_device *dev) 329static void cpmac_set_multicast_list(struct net_device *dev)
330{ 330{
331 struct dev_mc_list *iter; 331 struct netdev_hw_addr *ha;
332 u8 tmp; 332 u8 tmp;
333 u32 mbp, bit, hash[2] = { 0, }; 333 u32 mbp, bit, hash[2] = { 0, };
334 struct cpmac_priv *priv = netdev_priv(dev); 334 struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,19 +348,19 @@ static void cpmac_set_multicast_list(struct net_device *dev)
348 * cpmac uses some strange mac address hashing 348 * cpmac uses some strange mac address hashing
349 * (not crc32) 349 * (not crc32)
350 */ 350 */
351 netdev_for_each_mc_addr(iter, dev) { 351 netdev_for_each_mc_addr(ha, dev) {
352 bit = 0; 352 bit = 0;
353 tmp = iter->dmi_addr[0]; 353 tmp = ha->addr[0];
354 bit ^= (tmp >> 2) ^ (tmp << 4); 354 bit ^= (tmp >> 2) ^ (tmp << 4);
355 tmp = iter->dmi_addr[1]; 355 tmp = ha->addr[1];
356 bit ^= (tmp >> 4) ^ (tmp << 2); 356 bit ^= (tmp >> 4) ^ (tmp << 2);
357 tmp = iter->dmi_addr[2]; 357 tmp = ha->addr[2];
358 bit ^= (tmp >> 6) ^ tmp; 358 bit ^= (tmp >> 6) ^ tmp;
359 tmp = iter->dmi_addr[3]; 359 tmp = ha->addr[3];
360 bit ^= (tmp >> 2) ^ (tmp << 4); 360 bit ^= (tmp >> 2) ^ (tmp << 4);
361 tmp = iter->dmi_addr[4]; 361 tmp = ha->addr[4];
362 bit ^= (tmp >> 4) ^ (tmp << 2); 362 bit ^= (tmp >> 4) ^ (tmp << 2);
363 tmp = iter->dmi_addr[5]; 363 tmp = ha->addr[5];
364 bit ^= (tmp >> 6) ^ tmp; 364 bit ^= (tmp >> 6) ^ tmp;
365 bit &= 0x3f; 365 bit &= 0x3f;
366 hash[bit / 32] |= 1 << (bit % 32); 366 hash[bit / 32] |= 1 << (bit % 32);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 61a33914e96f..f49ad8ed9b07 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1595,16 +1595,16 @@ set_multicast_list(struct net_device *dev)
1595 } else { 1595 } else {
1596 /* MC mode, receive normal and MC packets */ 1596 /* MC mode, receive normal and MC packets */
1597 char hash_ix; 1597 char hash_ix;
1598 struct dev_mc_list *dmi; 1598 struct netdev_hw_addr *ha;
1599 char *baddr; 1599 char *baddr;
1600 1600
1601 lo_bits = 0x00000000ul; 1601 lo_bits = 0x00000000ul;
1602 hi_bits = 0x00000000ul; 1602 hi_bits = 0x00000000ul;
1603 netdev_for_each_mc_addr(dmi, dev) { 1603 netdev_for_each_mc_addr(ha, dev) {
1604 /* Calculate the hash index for the GA registers */ 1604 /* Calculate the hash index for the GA registers */
1605 1605
1606 hash_ix = 0; 1606 hash_ix = 0;
1607 baddr = dmi->dmi_addr; 1607 baddr = ha->addr;
1608 hash_ix ^= (*baddr) & 0x3f; 1608 hash_ix ^= (*baddr) & 0x3f;
1609 hash_ix ^= ((*baddr) >> 6) & 0x03; 1609 hash_ix ^= ((*baddr) >> 6) & 0x03;
1610 ++baddr; 1610 ++baddr;
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 07d7e7fab3f5..5962b911b5bd 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -118,7 +118,7 @@ struct rx_sw_desc { /* SW state per Rx descriptor */
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 struct fl_pg_chunk pg_chunk; 119 struct fl_pg_chunk pg_chunk;
120 }; 120 };
121 DECLARE_PCI_UNMAP_ADDR(dma_addr); 121 DEFINE_DMA_UNMAP_ADDR(dma_addr);
122}; 122};
123 123
124struct rsp_desc { /* response queue descriptor */ 124struct rsp_desc { /* response queue descriptor */
@@ -208,7 +208,7 @@ static inline int need_skb_unmap(void)
208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything. 208 * unmapping by checking if DECLARE_PCI_UNMAP_ADDR defines anything.
209 */ 209 */
210 struct dummy { 210 struct dummy {
211 DECLARE_PCI_UNMAP_ADDR(addr); 211 DEFINE_DMA_UNMAP_ADDR(addr);
212 }; 212 };
213 213
214 return sizeof(struct dummy) != 0; 214 return sizeof(struct dummy) != 0;
@@ -363,7 +363,7 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
363 put_page(d->pg_chunk.page); 363 put_page(d->pg_chunk.page);
364 d->pg_chunk.page = NULL; 364 d->pg_chunk.page = NULL;
365 } else { 365 } else {
366 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 366 pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr),
367 q->buf_size, PCI_DMA_FROMDEVICE); 367 q->buf_size, PCI_DMA_FROMDEVICE);
368 kfree_skb(d->skb); 368 kfree_skb(d->skb);
369 d->skb = NULL; 369 d->skb = NULL;
@@ -419,7 +419,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
419 if (unlikely(pci_dma_mapping_error(pdev, mapping))) 419 if (unlikely(pci_dma_mapping_error(pdev, mapping)))
420 return -ENOMEM; 420 return -ENOMEM;
421 421
422 pci_unmap_addr_set(sd, dma_addr, mapping); 422 dma_unmap_addr_set(sd, dma_addr, mapping);
423 423
424 d->addr_lo = cpu_to_be32(mapping); 424 d->addr_lo = cpu_to_be32(mapping);
425 d->addr_hi = cpu_to_be32((u64) mapping >> 32); 425 d->addr_hi = cpu_to_be32((u64) mapping >> 32);
@@ -515,7 +515,7 @@ nomem: q->alloc_failed++;
515 break; 515 break;
516 } 516 }
517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset; 517 mapping = sd->pg_chunk.mapping + sd->pg_chunk.offset;
518 pci_unmap_addr_set(sd, dma_addr, mapping); 518 dma_unmap_addr_set(sd, dma_addr, mapping);
519 519
520 add_one_rx_chunk(mapping, d, q->gen); 520 add_one_rx_chunk(mapping, d, q->gen);
521 pci_dma_sync_single_for_device(adap->pdev, mapping, 521 pci_dma_sync_single_for_device(adap->pdev, mapping,
@@ -791,11 +791,11 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
791 if (likely(skb != NULL)) { 791 if (likely(skb != NULL)) {
792 __skb_put(skb, len); 792 __skb_put(skb, len);
793 pci_dma_sync_single_for_cpu(adap->pdev, 793 pci_dma_sync_single_for_cpu(adap->pdev,
794 pci_unmap_addr(sd, dma_addr), len, 794 dma_unmap_addr(sd, dma_addr), len,
795 PCI_DMA_FROMDEVICE); 795 PCI_DMA_FROMDEVICE);
796 memcpy(skb->data, sd->skb->data, len); 796 memcpy(skb->data, sd->skb->data, len);
797 pci_dma_sync_single_for_device(adap->pdev, 797 pci_dma_sync_single_for_device(adap->pdev,
798 pci_unmap_addr(sd, dma_addr), len, 798 dma_unmap_addr(sd, dma_addr), len,
799 PCI_DMA_FROMDEVICE); 799 PCI_DMA_FROMDEVICE);
800 } else if (!drop_thres) 800 } else if (!drop_thres)
801 goto use_orig_buf; 801 goto use_orig_buf;
@@ -810,7 +810,7 @@ recycle:
810 goto recycle; 810 goto recycle;
811 811
812use_orig_buf: 812use_orig_buf:
813 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr), 813 pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr),
814 fl->buf_size, PCI_DMA_FROMDEVICE); 814 fl->buf_size, PCI_DMA_FROMDEVICE);
815 skb = sd->skb; 815 skb = sd->skb;
816 skb_put(skb, len); 816 skb_put(skb, len);
@@ -843,7 +843,7 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
843 struct sk_buff *newskb, *skb; 843 struct sk_buff *newskb, *skb;
844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 844 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
845 845
846 dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr); 846 dma_addr_t dma_addr = dma_unmap_addr(sd, dma_addr);
847 847
848 newskb = skb = q->pg_skb; 848 newskb = skb = q->pg_skb;
849 if (!skb && (len <= SGE_RX_COPY_THRES)) { 849 if (!skb && (len <= SGE_RX_COPY_THRES)) {
@@ -2097,7 +2097,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2097 fl->credits--; 2097 fl->credits--;
2098 2098
2099 pci_dma_sync_single_for_cpu(adap->pdev, 2099 pci_dma_sync_single_for_cpu(adap->pdev,
2100 pci_unmap_addr(sd, dma_addr), 2100 dma_unmap_addr(sd, dma_addr),
2101 fl->buf_size - SGE_PG_RSVD, 2101 fl->buf_size - SGE_PG_RSVD,
2102 PCI_DMA_FROMDEVICE); 2102 PCI_DMA_FROMDEVICE);
2103 2103
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index c142a2132e9f..3af19a550372 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -311,16 +311,16 @@ int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
311 if (dev->flags & IFF_ALLMULTI) 311 if (dev->flags & IFF_ALLMULTI)
312 hash_lo = hash_hi = 0xffffffff; 312 hash_lo = hash_hi = 0xffffffff;
313 else { 313 else {
314 struct dev_mc_list *dmi; 314 struct netdev_hw_addr *ha;
315 int exact_addr_idx = mac->nucast; 315 int exact_addr_idx = mac->nucast;
316 316
317 hash_lo = hash_hi = 0; 317 hash_lo = hash_hi = 0;
318 netdev_for_each_mc_addr(dmi, dev) 318 netdev_for_each_mc_addr(ha, dev)
319 if (exact_addr_idx < EXACT_ADDR_FILTERS) 319 if (exact_addr_idx < EXACT_ADDR_FILTERS)
320 set_addr_filter(mac, exact_addr_idx++, 320 set_addr_filter(mac, exact_addr_idx++,
321 dmi->dmi_addr); 321 ha->addr);
322 else { 322 else {
323 int hash = hash_hw_addr(dmi->dmi_addr); 323 int hash = hash_hw_addr(ha->addr);
324 324
325 if (hash < 32) 325 if (hash < 32)
326 hash_lo |= (1 << hash); 326 hash_lo |= (1 << hash);
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index a7e30a23d322..5f582dba928f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -240,9 +240,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
240 u16 filt_idx[7]; 240 u16 filt_idx[7];
241 const u8 *addr[7]; 241 const u8 *addr[7];
242 int ret, naddr = 0; 242 int ret, naddr = 0;
243 const struct dev_addr_list *d;
244 const struct netdev_hw_addr *ha; 243 const struct netdev_hw_addr *ha;
245 int uc_cnt = netdev_uc_count(dev); 244 int uc_cnt = netdev_uc_count(dev);
245 int mc_cnt = netdev_mc_count(dev);
246 const struct port_info *pi = netdev_priv(dev); 246 const struct port_info *pi = netdev_priv(dev);
247 247
248 /* first do the secondary unicast addresses */ 248 /* first do the secondary unicast addresses */
@@ -260,9 +260,9 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
260 } 260 }
261 261
262 /* next set up the multicast addresses */ 262 /* next set up the multicast addresses */
263 netdev_for_each_mc_addr(d, dev) { 263 netdev_for_each_mc_addr(ha, dev) {
264 addr[naddr++] = d->dmi_addr; 264 addr[naddr++] = ha->addr;
265 if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) { 265 if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free, 266 ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
267 naddr, addr, filt_idx, &mhash, sleep); 267 naddr, addr, filt_idx, &mhash, sleep);
268 if (ret < 0) 268 if (ret < 0)
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 2b8edd2efbf6..1f9df5c6a75a 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -952,13 +952,14 @@ static void emac_dev_mcast_set(struct net_device *ndev)
952 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL); 952 emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
953 } 953 }
954 if (!netdev_mc_empty(ndev)) { 954 if (!netdev_mc_empty(ndev)) {
955 struct dev_mc_list *mc_ptr; 955 struct netdev_hw_addr *ha;
956
956 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST); 957 mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
957 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL); 958 emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
958 /* program multicast address list into EMAC hardware */ 959 /* program multicast address list into EMAC hardware */
959 netdev_for_each_mc_addr(mc_ptr, ndev) { 960 netdev_for_each_mc_addr(ha, ndev) {
960 emac_add_mcast(priv, EMAC_MULTICAST_ADD, 961 emac_add_mcast(priv, EMAC_MULTICAST_ADD,
961 (u8 *) mc_ptr->dmi_addr); 962 (u8 *) ha->addr);
962 } 963 }
963 } else { 964 } else {
964 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST); 965 mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index 8cf3cc6f20e2..fb3f0984c289 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -940,7 +940,7 @@ static void lance_load_multicast(struct net_device *dev)
940{ 940{
941 struct lance_private *lp = netdev_priv(dev); 941 struct lance_private *lp = netdev_priv(dev);
942 volatile u16 *ib = (volatile u16 *)dev->mem_start; 942 volatile u16 *ib = (volatile u16 *)dev->mem_start;
943 struct dev_mc_list *dmi; 943 struct netdev_hw_addr *ha;
944 char *addrs; 944 char *addrs;
945 u32 crc; 945 u32 crc;
946 946
@@ -959,8 +959,8 @@ static void lance_load_multicast(struct net_device *dev)
959 *lib_ptr(ib, filter[3], lp->type) = 0; 959 *lib_ptr(ib, filter[3], lp->type) = 0;
960 960
961 /* Add addresses */ 961 /* Add addresses */
962 netdev_for_each_mc_addr(dmi, dev) { 962 netdev_for_each_mc_addr(ha, dev) {
963 addrs = dmi->dmi_addr; 963 addrs = ha->addr;
964 964
965 /* multicast address? */ 965 /* multicast address? */
966 if (!(*addrs & 1)) 966 if (!(*addrs & 1))
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index ed53a8d45f4e..e5667c55844e 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -2195,7 +2195,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2195{ 2195{
2196 DFX_board_t *bp = netdev_priv(dev); 2196 DFX_board_t *bp = netdev_priv(dev);
2197 int i; /* used as index in for loop */ 2197 int i; /* used as index in for loop */
2198 struct dev_mc_list *dmi; /* ptr to multicast addr entry */ 2198 struct netdev_hw_addr *ha;
2199 2199
2200 /* Enable LLC frame promiscuous mode, if necessary */ 2200 /* Enable LLC frame promiscuous mode, if necessary */
2201 2201
@@ -2241,9 +2241,9 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
2241 /* Copy addresses to multicast address table, then update adapter CAM */ 2241 /* Copy addresses to multicast address table, then update adapter CAM */
2242 2242
2243 i = 0; 2243 i = 0;
2244 netdev_for_each_mc_addr(dmi, dev) 2244 netdev_for_each_mc_addr(ha, dev)
2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN], 2245 memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
2246 dmi->dmi_addr, FDDI_K_ALEN); 2246 ha->addr, FDDI_K_ALEN);
2247 2247
2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS) 2248 if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
2249 { 2249 {
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 744c1928dfca..a88300a0d1e8 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev)
1272static void SetMulticastFilter(struct net_device *dev) 1272static void SetMulticastFilter(struct net_device *dev)
1273{ 1273{
1274 struct depca_private *lp = netdev_priv(dev); 1274 struct depca_private *lp = netdev_priv(dev);
1275 struct dev_mc_list *dmi; 1275 struct netdev_hw_addr *ha;
1276 char *addrs; 1276 char *addrs;
1277 int i, j, bit, byte; 1277 int i, j, bit, byte;
1278 u16 hashcode; 1278 u16 hashcode;
@@ -1287,8 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev)
1287 lp->init_block.mcast_table[i] = 0; 1287 lp->init_block.mcast_table[i] = 0;
1288 } 1288 }
1289 /* Add multicast addresses */ 1289 /* Add multicast addresses */
1290 netdev_for_each_mc_addr(dmi, dev) { 1290 netdev_for_each_mc_addr(ha, dev) {
1291 addrs = dmi->dmi_addr; 1291 addrs = ha->addr;
1292 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1292 if ((*addrs & 0x01) == 1) { /* multicast address? */
1293 crc = ether_crc(ETH_ALEN, addrs); 1293 crc = ether_crc(ETH_ALEN, addrs);
1294 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */ 1294 hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index b05bad829827..6579225dbd91 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1132,14 +1132,14 @@ set_multicast (struct net_device *dev)
1132 /* Receive broadcast and multicast frames */ 1132 /* Receive broadcast and multicast frames */
1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast; 1133 rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
1134 } else if (!netdev_mc_empty(dev)) { 1134 } else if (!netdev_mc_empty(dev)) {
1135 struct dev_mc_list *mclist; 1135 struct netdev_hw_addr *ha;
1136 /* Receive broadcast frames and multicast frames filtering 1136 /* Receive broadcast frames and multicast frames filtering
1137 by Hashtable */ 1137 by Hashtable */
1138 rx_mode = 1138 rx_mode =
1139 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast; 1139 ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
1140 netdev_for_each_mc_addr(mclist, dev) { 1140 netdev_for_each_mc_addr(ha, dev) {
1141 int bit, index = 0; 1141 int bit, index = 0;
1142 int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1142 int crc = ether_crc_le(ETH_ALEN, ha->addr);
1143 /* The inverted high significant 6 bits of CRC are 1143 /* The inverted high significant 6 bits of CRC are
1144 used as an index to hashtable */ 1144 used as an index to hashtable */
1145 for (bit = 0; bit < 6; bit++) 1145 for (bit = 0; bit < 6; bit++)
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 7f9960f718e3..a818ea998bbe 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -725,7 +725,7 @@ static void
725dm9000_hash_table(struct net_device *dev) 725dm9000_hash_table(struct net_device *dev)
726{ 726{
727 board_info_t *db = netdev_priv(dev); 727 board_info_t *db = netdev_priv(dev);
728 struct dev_mc_list *mcptr; 728 struct netdev_hw_addr *ha;
729 int i, oft; 729 int i, oft;
730 u32 hash_val; 730 u32 hash_val;
731 u16 hash_table[4]; 731 u16 hash_table[4];
@@ -753,8 +753,8 @@ dm9000_hash_table(struct net_device *dev)
753 rcr |= RCR_ALL; 753 rcr |= RCR_ALL;
754 754
755 /* the multicast address in Hash Table : 64 bits */ 755 /* the multicast address in Hash Table : 64 bits */
756 netdev_for_each_mc_addr(mcptr, dev) { 756 netdev_for_each_mc_addr(ha, dev) {
757 hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f; 757 hash_val = ether_crc_le(6, ha->addr) & 0x3f;
758 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 758 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
759 } 759 }
760 760
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c
index 234685213f1a..d51a83e69585 100644
--- a/drivers/net/dnet.c
+++ b/drivers/net/dnet.c
@@ -918,7 +918,7 @@ static int __devinit dnet_probe(struct platform_device *pdev)
918 918
919 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", 919 dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
920 bp->regs, mem_base, dev->irq, dev->dev_addr); 920 bp->regs, mem_base, dev->irq, dev->dev_addr);
921 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma \n", 921 dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
922 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", 922 (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
923 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", 923 (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
924 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", 924 (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ",
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index b997e578e58f..3e8d0005540f 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -147,6 +147,8 @@
147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs 147 * - add clean lowlevel I/O emulation for cards with MII-lacking PHYs
148 */ 148 */
149 149
150#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
151
150#include <linux/module.h> 152#include <linux/module.h>
151#include <linux/moduleparam.h> 153#include <linux/moduleparam.h>
152#include <linux/kernel.h> 154#include <linux/kernel.h>
@@ -174,7 +176,6 @@
174#define DRV_VERSION "3.5.24-k2"DRV_EXT 176#define DRV_VERSION "3.5.24-k2"DRV_EXT
175#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" 177#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
176#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation" 178#define DRV_COPYRIGHT "Copyright(c) 1999-2006 Intel Corporation"
177#define PFX DRV_NAME ": "
178 179
179#define E100_WATCHDOG_PERIOD (2 * HZ) 180#define E100_WATCHDOG_PERIOD (2 * HZ)
180#define E100_NAPI_WEIGHT 16 181#define E100_NAPI_WEIGHT 16
@@ -200,10 +201,6 @@ module_param(use_io, int, 0);
200MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 201MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
201MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums"); 202MODULE_PARM_DESC(eeprom_bad_csum_allow, "Allow bad eeprom checksums");
202MODULE_PARM_DESC(use_io, "Force use of i/o access mode"); 203MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
203#define DPRINTK(nlevel, klevel, fmt, args...) \
204 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \
205 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \
206 __func__ , ## args))
207 204
208#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 205#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
209 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \ 206 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
@@ -689,12 +686,13 @@ static int e100_self_test(struct nic *nic)
689 686
690 /* Check results of self-test */ 687 /* Check results of self-test */
691 if (nic->mem->selftest.result != 0) { 688 if (nic->mem->selftest.result != 0) {
692 DPRINTK(HW, ERR, "Self-test failed: result=0x%08X\n", 689 netif_err(nic, hw, nic->netdev,
693 nic->mem->selftest.result); 690 "Self-test failed: result=0x%08X\n",
691 nic->mem->selftest.result);
694 return -ETIMEDOUT; 692 return -ETIMEDOUT;
695 } 693 }
696 if (nic->mem->selftest.signature == 0) { 694 if (nic->mem->selftest.signature == 0) {
697 DPRINTK(HW, ERR, "Self-test failed: timed out\n"); 695 netif_err(nic, hw, nic->netdev, "Self-test failed: timed out\n");
698 return -ETIMEDOUT; 696 return -ETIMEDOUT;
699 } 697 }
700 698
@@ -797,7 +795,7 @@ static int e100_eeprom_load(struct nic *nic)
797 /* The checksum, stored in the last word, is calculated such that 795 /* The checksum, stored in the last word, is calculated such that
798 * the sum of words should be 0xBABA */ 796 * the sum of words should be 0xBABA */
799 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) { 797 if (cpu_to_le16(0xBABA - checksum) != nic->eeprom[nic->eeprom_wc - 1]) {
800 DPRINTK(PROBE, ERR, "EEPROM corrupted\n"); 798 netif_err(nic, probe, nic->netdev, "EEPROM corrupted\n");
801 if (!eeprom_bad_csum_allow) 799 if (!eeprom_bad_csum_allow)
802 return -EAGAIN; 800 return -EAGAIN;
803 } 801 }
@@ -953,8 +951,7 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
953 udelay(20); 951 udelay(20);
954 } 952 }
955 if (unlikely(!i)) { 953 if (unlikely(!i)) {
956 printk("e100.mdio_ctrl(%s) won't go Ready\n", 954 netdev_err(nic->netdev, "e100.mdio_ctrl won't go Ready\n");
957 nic->netdev->name );
958 spin_unlock_irqrestore(&nic->mdio_lock, flags); 955 spin_unlock_irqrestore(&nic->mdio_lock, flags);
959 return 0; /* No way to indicate timeout error */ 956 return 0; /* No way to indicate timeout error */
960 } 957 }
@@ -966,9 +963,10 @@ static u16 mdio_ctrl_hw(struct nic *nic, u32 addr, u32 dir, u32 reg, u16 data)
966 break; 963 break;
967 } 964 }
968 spin_unlock_irqrestore(&nic->mdio_lock, flags); 965 spin_unlock_irqrestore(&nic->mdio_lock, flags);
969 DPRINTK(HW, DEBUG, 966 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
970 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n", 967 "%s:addr=%d, reg=%d, data_in=0x%04X, data_out=0x%04X\n",
971 dir == mdi_read ? "READ" : "WRITE", addr, reg, data, data_out); 968 dir == mdi_read ? "READ" : "WRITE",
969 addr, reg, data, data_out);
972 return (u16)data_out; 970 return (u16)data_out;
973} 971}
974 972
@@ -1028,17 +1026,19 @@ static u16 mdio_ctrl_phy_mii_emulated(struct nic *nic,
1028 return ADVERTISE_10HALF | 1026 return ADVERTISE_10HALF |
1029 ADVERTISE_10FULL; 1027 ADVERTISE_10FULL;
1030 default: 1028 default:
1031 DPRINTK(HW, DEBUG, 1029 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1032 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", 1030 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1033 dir == mdi_read ? "READ" : "WRITE", addr, reg, data); 1031 dir == mdi_read ? "READ" : "WRITE",
1032 addr, reg, data);
1034 return 0xFFFF; 1033 return 0xFFFF;
1035 } 1034 }
1036 } else { 1035 } else {
1037 switch (reg) { 1036 switch (reg) {
1038 default: 1037 default:
1039 DPRINTK(HW, DEBUG, 1038 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1040 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n", 1039 "%s:addr=%d, reg=%d, data=0x%04X: unimplemented emulation!\n",
1041 dir == mdi_read ? "READ" : "WRITE", addr, reg, data); 1040 dir == mdi_read ? "READ" : "WRITE",
1041 addr, reg, data);
1042 return 0xFFFF; 1042 return 0xFFFF;
1043 } 1043 }
1044 } 1044 }
@@ -1155,12 +1155,15 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1155 } 1155 }
1156 } 1156 }
1157 1157
1158 DPRINTK(HW, DEBUG, "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1158 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1159 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]); 1159 "[00-07]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1160 DPRINTK(HW, DEBUG, "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1160 c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
1161 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]); 1161 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1162 DPRINTK(HW, DEBUG, "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n", 1162 "[08-15]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1163 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]); 1163 c[8], c[9], c[10], c[11], c[12], c[13], c[14], c[15]);
1164 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1165 "[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
1166 c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
1164} 1167}
1165 1168
1166/************************************************************************* 1169/*************************************************************************
@@ -1253,16 +1256,18 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1253 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1256 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1254 1257
1255 if (err) { 1258 if (err) {
1256 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", 1259 netif_err(nic, probe, nic->netdev,
1257 fw_name, err); 1260 "Failed to load firmware \"%s\": %d\n",
1261 fw_name, err);
1258 return ERR_PTR(err); 1262 return ERR_PTR(err);
1259 } 1263 }
1260 1264
1261 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1265 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1262 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ 1266 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1263 if (fw->size != UCODE_SIZE * 4 + 3) { 1267 if (fw->size != UCODE_SIZE * 4 + 3) {
1264 DPRINTK(PROBE, ERR, "Firmware \"%s\" has wrong size %zu\n", 1268 netif_err(nic, probe, nic->netdev,
1265 fw_name, fw->size); 1269 "Firmware \"%s\" has wrong size %zu\n",
1270 fw_name, fw->size);
1266 release_firmware(fw); 1271 release_firmware(fw);
1267 return ERR_PTR(-EINVAL); 1272 return ERR_PTR(-EINVAL);
1268 } 1273 }
@@ -1274,9 +1279,9 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1274 1279
1275 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE || 1280 if (timer >= UCODE_SIZE || bundle >= UCODE_SIZE ||
1276 min_size >= UCODE_SIZE) { 1281 min_size >= UCODE_SIZE) {
1277 DPRINTK(PROBE, ERR, 1282 netif_err(nic, probe, nic->netdev,
1278 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n", 1283 "\"%s\" has bogus offset values (0x%x,0x%x,0x%x)\n",
1279 fw_name, timer, bundle, min_size); 1284 fw_name, timer, bundle, min_size);
1280 release_firmware(fw); 1285 release_firmware(fw);
1281 return ERR_PTR(-EINVAL); 1286 return ERR_PTR(-EINVAL);
1282 } 1287 }
@@ -1328,7 +1333,8 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1328 return PTR_ERR(fw); 1333 return PTR_ERR(fw);
1329 1334
1330 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) 1335 if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode)))
1331 DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err); 1336 netif_err(nic, probe, nic->netdev,
1337 "ucode cmd failed with error %d\n", err);
1332 1338
1333 /* must restart cuc */ 1339 /* must restart cuc */
1334 nic->cuc_cmd = cuc_start; 1340 nic->cuc_cmd = cuc_start;
@@ -1348,7 +1354,7 @@ static inline int e100_load_ucode_wait(struct nic *nic)
1348 1354
1349 /* if the command failed, or is not OK, notify and return */ 1355 /* if the command failed, or is not OK, notify and return */
1350 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) { 1356 if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
1351 DPRINTK(PROBE,ERR, "ucode load failed\n"); 1357 netif_err(nic, probe, nic->netdev, "ucode load failed\n");
1352 err = -EPERM; 1358 err = -EPERM;
1353 } 1359 }
1354 1360
@@ -1386,8 +1392,8 @@ static int e100_phy_check_without_mii(struct nic *nic)
1386 * media is sensed automatically based on how the link partner 1392 * media is sensed automatically based on how the link partner
1387 * is configured. This is, in essence, manual configuration. 1393 * is configured. This is, in essence, manual configuration.
1388 */ 1394 */
1389 DPRINTK(PROBE, INFO, 1395 netif_info(nic, probe, nic->netdev,
1390 "found MII-less i82503 or 80c24 or other PHY\n"); 1396 "found MII-less i82503 or 80c24 or other PHY\n");
1391 1397
1392 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated; 1398 nic->mdio_ctrl = mdio_ctrl_phy_mii_emulated;
1393 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */ 1399 nic->mii.phy_id = 0; /* is this ok for an MII-less PHY? */
@@ -1434,18 +1440,20 @@ static int e100_phy_init(struct nic *nic)
1434 return 0; /* simply return and hope for the best */ 1440 return 0; /* simply return and hope for the best */
1435 else { 1441 else {
1436 /* for unknown cases log a fatal error */ 1442 /* for unknown cases log a fatal error */
1437 DPRINTK(HW, ERR, 1443 netif_err(nic, hw, nic->netdev,
1438 "Failed to locate any known PHY, aborting.\n"); 1444 "Failed to locate any known PHY, aborting\n");
1439 return -EAGAIN; 1445 return -EAGAIN;
1440 } 1446 }
1441 } else 1447 } else
1442 DPRINTK(HW, DEBUG, "phy_addr = %d\n", nic->mii.phy_id); 1448 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1449 "phy_addr = %d\n", nic->mii.phy_id);
1443 1450
1444 /* Get phy ID */ 1451 /* Get phy ID */
1445 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1); 1452 id_lo = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID1);
1446 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2); 1453 id_hi = mdio_read(netdev, nic->mii.phy_id, MII_PHYSID2);
1447 nic->phy = (u32)id_hi << 16 | (u32)id_lo; 1454 nic->phy = (u32)id_hi << 16 | (u32)id_lo;
1448 DPRINTK(HW, DEBUG, "phy ID = 0x%08X\n", nic->phy); 1455 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1456 "phy ID = 0x%08X\n", nic->phy);
1449 1457
1450 /* Select the phy and isolate the rest */ 1458 /* Select the phy and isolate the rest */
1451 for (addr = 0; addr < 32; addr++) { 1459 for (addr = 0; addr < 32; addr++) {
@@ -1507,7 +1515,7 @@ static int e100_hw_init(struct nic *nic)
1507 1515
1508 e100_hw_reset(nic); 1516 e100_hw_reset(nic);
1509 1517
1510 DPRINTK(HW, ERR, "e100_hw_init\n"); 1518 netif_err(nic, hw, nic->netdev, "e100_hw_init\n");
1511 if (!in_interrupt() && (err = e100_self_test(nic))) 1519 if (!in_interrupt() && (err = e100_self_test(nic)))
1512 return err; 1520 return err;
1513 1521
@@ -1537,16 +1545,16 @@ static int e100_hw_init(struct nic *nic)
1537static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb) 1545static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1538{ 1546{
1539 struct net_device *netdev = nic->netdev; 1547 struct net_device *netdev = nic->netdev;
1540 struct dev_mc_list *list; 1548 struct netdev_hw_addr *ha;
1541 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS); 1549 u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
1542 1550
1543 cb->command = cpu_to_le16(cb_multi); 1551 cb->command = cpu_to_le16(cb_multi);
1544 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN); 1552 cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
1545 i = 0; 1553 i = 0;
1546 netdev_for_each_mc_addr(list, netdev) { 1554 netdev_for_each_mc_addr(ha, netdev) {
1547 if (i == count) 1555 if (i == count)
1548 break; 1556 break;
1549 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr, 1557 memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
1550 ETH_ALEN); 1558 ETH_ALEN);
1551 } 1559 }
1552} 1560}
@@ -1555,8 +1563,9 @@ static void e100_set_multicast_list(struct net_device *netdev)
1555{ 1563{
1556 struct nic *nic = netdev_priv(netdev); 1564 struct nic *nic = netdev_priv(netdev);
1557 1565
1558 DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n", 1566 netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
1559 netdev_mc_count(netdev), netdev->flags); 1567 "mc_count=%d, flags=0x%04X\n",
1568 netdev_mc_count(netdev), netdev->flags);
1560 1569
1561 if (netdev->flags & IFF_PROMISC) 1570 if (netdev->flags & IFF_PROMISC)
1562 nic->flags |= promiscuous; 1571 nic->flags |= promiscuous;
@@ -1629,7 +1638,8 @@ static void e100_update_stats(struct nic *nic)
1629 1638
1630 1639
1631 if (e100_exec_cmd(nic, cuc_dump_reset, 0)) 1640 if (e100_exec_cmd(nic, cuc_dump_reset, 0))
1632 DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); 1641 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1642 "exec cuc_dump_reset failed\n");
1633} 1643}
1634 1644
1635static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex) 1645static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
@@ -1659,20 +1669,19 @@ static void e100_watchdog(unsigned long data)
1659 struct nic *nic = (struct nic *)data; 1669 struct nic *nic = (struct nic *)data;
1660 struct ethtool_cmd cmd; 1670 struct ethtool_cmd cmd;
1661 1671
1662 DPRINTK(TIMER, DEBUG, "right now = %ld\n", jiffies); 1672 netif_printk(nic, timer, KERN_DEBUG, nic->netdev,
1673 "right now = %ld\n", jiffies);
1663 1674
1664 /* mii library handles link maintenance tasks */ 1675 /* mii library handles link maintenance tasks */
1665 1676
1666 mii_ethtool_gset(&nic->mii, &cmd); 1677 mii_ethtool_gset(&nic->mii, &cmd);
1667 1678
1668 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) { 1679 if (mii_link_ok(&nic->mii) && !netif_carrier_ok(nic->netdev)) {
1669 printk(KERN_INFO "e100: %s NIC Link is Up %s Mbps %s Duplex\n", 1680 netdev_info(nic->netdev, "NIC Link is Up %u Mbps %s Duplex\n",
1670 nic->netdev->name, 1681 cmd.speed == SPEED_100 ? 100 : 10,
1671 cmd.speed == SPEED_100 ? "100" : "10", 1682 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1672 cmd.duplex == DUPLEX_FULL ? "Full" : "Half");
1673 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) { 1683 } else if (!mii_link_ok(&nic->mii) && netif_carrier_ok(nic->netdev)) {
1674 printk(KERN_INFO "e100: %s NIC Link is Down\n", 1684 netdev_info(nic->netdev, "NIC Link is Down\n");
1675 nic->netdev->name);
1676 } 1685 }
1677 1686
1678 mii_check_link(&nic->mii); 1687 mii_check_link(&nic->mii);
@@ -1732,7 +1741,8 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1732 Issue a NOP command followed by a 1us delay before 1741 Issue a NOP command followed by a 1us delay before
1733 issuing the Tx command. */ 1742 issuing the Tx command. */
1734 if (e100_exec_cmd(nic, cuc_nop, 0)) 1743 if (e100_exec_cmd(nic, cuc_nop, 0))
1735 DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n"); 1744 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1745 "exec cuc_nop failed\n");
1736 udelay(1); 1746 udelay(1);
1737 } 1747 }
1738 1748
@@ -1741,12 +1751,14 @@ static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
1741 switch (err) { 1751 switch (err) {
1742 case -ENOSPC: 1752 case -ENOSPC:
1743 /* We queued the skb, but now we're out of space. */ 1753 /* We queued the skb, but now we're out of space. */
1744 DPRINTK(TX_ERR, DEBUG, "No space for CB\n"); 1754 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1755 "No space for CB\n");
1745 netif_stop_queue(netdev); 1756 netif_stop_queue(netdev);
1746 break; 1757 break;
1747 case -ENOMEM: 1758 case -ENOMEM:
1748 /* This is a hard error - log it. */ 1759 /* This is a hard error - log it. */
1749 DPRINTK(TX_ERR, DEBUG, "Out of Tx resources, returning skb\n"); 1760 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
1761 "Out of Tx resources, returning skb\n");
1750 netif_stop_queue(netdev); 1762 netif_stop_queue(netdev);
1751 return NETDEV_TX_BUSY; 1763 return NETDEV_TX_BUSY;
1752 } 1764 }
@@ -1767,9 +1779,10 @@ static int e100_tx_clean(struct nic *nic)
1767 for (cb = nic->cb_to_clean; 1779 for (cb = nic->cb_to_clean;
1768 cb->status & cpu_to_le16(cb_complete); 1780 cb->status & cpu_to_le16(cb_complete);
1769 cb = nic->cb_to_clean = cb->next) { 1781 cb = nic->cb_to_clean = cb->next) {
1770 DPRINTK(TX_DONE, DEBUG, "cb[%d]->status = 0x%04X\n", 1782 netif_printk(nic, tx_done, KERN_DEBUG, nic->netdev,
1771 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)), 1783 "cb[%d]->status = 0x%04X\n",
1772 cb->status); 1784 (int)(((void*)cb - (void*)nic->cbs)/sizeof(struct cb)),
1785 cb->status);
1773 1786
1774 if (likely(cb->skb != NULL)) { 1787 if (likely(cb->skb != NULL)) {
1775 dev->stats.tx_packets++; 1788 dev->stats.tx_packets++;
@@ -1912,7 +1925,8 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1912 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL); 1925 sizeof(struct rfd), PCI_DMA_BIDIRECTIONAL);
1913 rfd_status = le16_to_cpu(rfd->status); 1926 rfd_status = le16_to_cpu(rfd->status);
1914 1927
1915 DPRINTK(RX_STATUS, DEBUG, "status=0x%04X\n", rfd_status); 1928 netif_printk(nic, rx_status, KERN_DEBUG, nic->netdev,
1929 "status=0x%04X\n", rfd_status);
1916 1930
1917 /* If data isn't ready, nothing to indicate */ 1931 /* If data isn't ready, nothing to indicate */
1918 if (unlikely(!(rfd_status & cb_complete))) { 1932 if (unlikely(!(rfd_status & cb_complete))) {
@@ -2123,7 +2137,8 @@ static irqreturn_t e100_intr(int irq, void *dev_id)
2123 struct nic *nic = netdev_priv(netdev); 2137 struct nic *nic = netdev_priv(netdev);
2124 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack); 2138 u8 stat_ack = ioread8(&nic->csr->scb.stat_ack);
2125 2139
2126 DPRINTK(INTR, DEBUG, "stat_ack = 0x%02X\n", stat_ack); 2140 netif_printk(nic, intr, KERN_DEBUG, nic->netdev,
2141 "stat_ack = 0x%02X\n", stat_ack);
2127 2142
2128 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */ 2143 if (stat_ack == stat_ack_not_ours || /* Not our interrupt */
2129 stat_ack == stat_ack_not_present) /* Hardware is ejected */ 2144 stat_ack == stat_ack_not_present) /* Hardware is ejected */
@@ -2263,8 +2278,8 @@ static void e100_tx_timeout_task(struct work_struct *work)
2263 struct nic *nic = container_of(work, struct nic, tx_timeout_task); 2278 struct nic *nic = container_of(work, struct nic, tx_timeout_task);
2264 struct net_device *netdev = nic->netdev; 2279 struct net_device *netdev = nic->netdev;
2265 2280
2266 DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n", 2281 netif_printk(nic, tx_err, KERN_DEBUG, nic->netdev,
2267 ioread8(&nic->csr->scb.status)); 2282 "scb.status=0x%02X\n", ioread8(&nic->csr->scb.status));
2268 e100_down(netdev_priv(netdev)); 2283 e100_down(netdev_priv(netdev));
2269 e100_up(netdev_priv(netdev)); 2284 e100_up(netdev_priv(netdev));
2270} 2285}
@@ -2526,8 +2541,8 @@ static int e100_set_ringparam(struct net_device *netdev,
2526 rfds->count = min(rfds->count, rfds->max); 2541 rfds->count = min(rfds->count, rfds->max);
2527 cbs->count = max(ring->tx_pending, cbs->min); 2542 cbs->count = max(ring->tx_pending, cbs->min);
2528 cbs->count = min(cbs->count, cbs->max); 2543 cbs->count = min(cbs->count, cbs->max);
2529 DPRINTK(DRV, INFO, "Ring Param settings: rx: %d, tx %d\n", 2544 netif_info(nic, drv, nic->netdev, "Ring Param settings: rx: %d, tx %d\n",
2530 rfds->count, cbs->count); 2545 rfds->count, cbs->count);
2531 if (netif_running(netdev)) 2546 if (netif_running(netdev))
2532 e100_up(nic); 2547 e100_up(nic);
2533 2548
@@ -2704,7 +2719,7 @@ static int e100_open(struct net_device *netdev)
2704 2719
2705 netif_carrier_off(netdev); 2720 netif_carrier_off(netdev);
2706 if ((err = e100_up(nic))) 2721 if ((err = e100_up(nic)))
2707 DPRINTK(IFUP, ERR, "Cannot open interface, aborting.\n"); 2722 netif_err(nic, ifup, nic->netdev, "Cannot open interface, aborting\n");
2708 return err; 2723 return err;
2709} 2724}
2710 2725
@@ -2738,7 +2753,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2738 2753
2739 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) { 2754 if (!(netdev = alloc_etherdev(sizeof(struct nic)))) {
2740 if (((1 << debug) - 1) & NETIF_MSG_PROBE) 2755 if (((1 << debug) - 1) & NETIF_MSG_PROBE)
2741 printk(KERN_ERR PFX "Etherdev alloc failed, abort.\n"); 2756 pr_err("Etherdev alloc failed, aborting\n");
2742 return -ENOMEM; 2757 return -ENOMEM;
2743 } 2758 }
2744 2759
@@ -2756,35 +2771,34 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2756 pci_set_drvdata(pdev, netdev); 2771 pci_set_drvdata(pdev, netdev);
2757 2772
2758 if ((err = pci_enable_device(pdev))) { 2773 if ((err = pci_enable_device(pdev))) {
2759 DPRINTK(PROBE, ERR, "Cannot enable PCI device, aborting.\n"); 2774 netif_err(nic, probe, nic->netdev, "Cannot enable PCI device, aborting\n");
2760 goto err_out_free_dev; 2775 goto err_out_free_dev;
2761 } 2776 }
2762 2777
2763 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2778 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2764 DPRINTK(PROBE, ERR, "Cannot find proper PCI device " 2779 netif_err(nic, probe, nic->netdev, "Cannot find proper PCI device base address, aborting\n");
2765 "base address, aborting.\n");
2766 err = -ENODEV; 2780 err = -ENODEV;
2767 goto err_out_disable_pdev; 2781 goto err_out_disable_pdev;
2768 } 2782 }
2769 2783
2770 if ((err = pci_request_regions(pdev, DRV_NAME))) { 2784 if ((err = pci_request_regions(pdev, DRV_NAME))) {
2771 DPRINTK(PROBE, ERR, "Cannot obtain PCI resources, aborting.\n"); 2785 netif_err(nic, probe, nic->netdev, "Cannot obtain PCI resources, aborting\n");
2772 goto err_out_disable_pdev; 2786 goto err_out_disable_pdev;
2773 } 2787 }
2774 2788
2775 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) { 2789 if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)))) {
2776 DPRINTK(PROBE, ERR, "No usable DMA configuration, aborting.\n"); 2790 netif_err(nic, probe, nic->netdev, "No usable DMA configuration, aborting\n");
2777 goto err_out_free_res; 2791 goto err_out_free_res;
2778 } 2792 }
2779 2793
2780 SET_NETDEV_DEV(netdev, &pdev->dev); 2794 SET_NETDEV_DEV(netdev, &pdev->dev);
2781 2795
2782 if (use_io) 2796 if (use_io)
2783 DPRINTK(PROBE, INFO, "using i/o access mode\n"); 2797 netif_info(nic, probe, nic->netdev, "using i/o access mode\n");
2784 2798
2785 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr)); 2799 nic->csr = pci_iomap(pdev, (use_io ? 1 : 0), sizeof(struct csr));
2786 if (!nic->csr) { 2800 if (!nic->csr) {
2787 DPRINTK(PROBE, ERR, "Cannot map device registers, aborting.\n"); 2801 netif_err(nic, probe, nic->netdev, "Cannot map device registers, aborting\n");
2788 err = -ENOMEM; 2802 err = -ENOMEM;
2789 goto err_out_free_res; 2803 goto err_out_free_res;
2790 } 2804 }
@@ -2818,7 +2832,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2818 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task); 2832 INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
2819 2833
2820 if ((err = e100_alloc(nic))) { 2834 if ((err = e100_alloc(nic))) {
2821 DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n"); 2835 netif_err(nic, probe, nic->netdev, "Cannot alloc driver memory, aborting\n");
2822 goto err_out_iounmap; 2836 goto err_out_iounmap;
2823 } 2837 }
2824 2838
@@ -2831,13 +2845,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2831 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN); 2845 memcpy(netdev->perm_addr, nic->eeprom, ETH_ALEN);
2832 if (!is_valid_ether_addr(netdev->perm_addr)) { 2846 if (!is_valid_ether_addr(netdev->perm_addr)) {
2833 if (!eeprom_bad_csum_allow) { 2847 if (!eeprom_bad_csum_allow) {
2834 DPRINTK(PROBE, ERR, "Invalid MAC address from " 2848 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, aborting\n");
2835 "EEPROM, aborting.\n");
2836 err = -EAGAIN; 2849 err = -EAGAIN;
2837 goto err_out_free; 2850 goto err_out_free;
2838 } else { 2851 } else {
2839 DPRINTK(PROBE, ERR, "Invalid MAC address from EEPROM, " 2852 netif_err(nic, probe, nic->netdev, "Invalid MAC address from EEPROM, you MUST configure one.\n");
2840 "you MUST configure one.\n");
2841 } 2853 }
2842 } 2854 }
2843 2855
@@ -2853,7 +2865,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2853 2865
2854 strcpy(netdev->name, "eth%d"); 2866 strcpy(netdev->name, "eth%d");
2855 if ((err = register_netdev(netdev))) { 2867 if ((err = register_netdev(netdev))) {
2856 DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n"); 2868 netif_err(nic, probe, nic->netdev, "Cannot register net device, aborting\n");
2857 goto err_out_free; 2869 goto err_out_free;
2858 } 2870 }
2859 nic->cbs_pool = pci_pool_create(netdev->name, 2871 nic->cbs_pool = pci_pool_create(netdev->name,
@@ -2861,9 +2873,10 @@ static int __devinit e100_probe(struct pci_dev *pdev,
2861 nic->params.cbs.max * sizeof(struct cb), 2873 nic->params.cbs.max * sizeof(struct cb),
2862 sizeof(u32), 2874 sizeof(u32),
2863 0); 2875 0);
2864 DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n", 2876 netif_info(nic, probe, nic->netdev,
2865 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0), 2877 "addr 0x%llx, irq %d, MAC addr %pM\n",
2866 pdev->irq, netdev->dev_addr); 2878 (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
2879 pdev->irq, netdev->dev_addr);
2867 2880
2868 return 0; 2881 return 0;
2869 2882
@@ -3021,7 +3034,7 @@ static pci_ers_result_t e100_io_slot_reset(struct pci_dev *pdev)
3021 struct nic *nic = netdev_priv(netdev); 3034 struct nic *nic = netdev_priv(netdev);
3022 3035
3023 if (pci_enable_device(pdev)) { 3036 if (pci_enable_device(pdev)) {
3024 printk(KERN_ERR "e100: Cannot re-enable PCI device after reset.\n"); 3037 pr_err("Cannot re-enable PCI device after reset\n");
3025 return PCI_ERS_RESULT_DISCONNECT; 3038 return PCI_ERS_RESULT_DISCONNECT;
3026 } 3039 }
3027 pci_set_master(pdev); 3040 pci_set_master(pdev);
@@ -3080,8 +3093,8 @@ static struct pci_driver e100_driver = {
3080static int __init e100_init_module(void) 3093static int __init e100_init_module(void)
3081{ 3094{
3082 if (((1 << debug) - 1) & NETIF_MSG_DRV) { 3095 if (((1 << debug) - 1) & NETIF_MSG_DRV) {
3083 printk(KERN_INFO PFX "%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); 3096 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
3084 printk(KERN_INFO PFX "%s\n", DRV_COPYRIGHT); 3097 pr_info("%s\n", DRV_COPYRIGHT);
3085 } 3098 }
3086 return pci_register_driver(&e100_driver); 3099 return pci_register_driver(&e100_driver);
3087} 3100}
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 8d7d87f12827..e2b6e6e7ba6a 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -990,7 +990,7 @@ static s32 e1000_copper_link_preconfig(struct e1000_hw *hw)
990 DEBUGOUT("Error, did not detect valid phy.\n"); 990 DEBUGOUT("Error, did not detect valid phy.\n");
991 return ret_val; 991 return ret_val;
992 } 992 }
993 DEBUGOUT1("Phy ID = %x \n", hw->phy_id); 993 DEBUGOUT1("Phy ID = %x\n", hw->phy_id);
994 994
995 /* Set PHY to class A mode (if necessary) */ 995 /* Set PHY to class A mode (if necessary) */
996 ret_val = e1000_set_phy_mode(hw); 996 ret_val = e1000_set_phy_mode(hw);
@@ -1680,7 +1680,7 @@ static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
1680 if (ret_val) 1680 if (ret_val)
1681 return ret_val; 1681 return ret_val;
1682 1682
1683 DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data); 1683 DEBUGOUT1("M88E1000 PSCR: %x\n", phy_data);
1684 1684
1685 /* Need to reset the PHY or these changes will be ignored */ 1685 /* Need to reset the PHY or these changes will be ignored */
1686 mii_ctrl_reg |= MII_CR_RESET; 1686 mii_ctrl_reg |= MII_CR_RESET;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index b15ece26ed84..974a02d81823 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -2098,7 +2098,6 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2098 struct e1000_hw *hw = &adapter->hw; 2098 struct e1000_hw *hw = &adapter->hw;
2099 struct netdev_hw_addr *ha; 2099 struct netdev_hw_addr *ha;
2100 bool use_uc = false; 2100 bool use_uc = false;
2101 struct dev_addr_list *mc_ptr;
2102 u32 rctl; 2101 u32 rctl;
2103 u32 hash_value; 2102 u32 hash_value;
2104 int i, rar_entries = E1000_RAR_ENTRIES; 2103 int i, rar_entries = E1000_RAR_ENTRIES;
@@ -2158,17 +2157,17 @@ static void e1000_set_rx_mode(struct net_device *netdev)
2158 2157
2159 WARN_ON(i == rar_entries); 2158 WARN_ON(i == rar_entries);
2160 2159
2161 netdev_for_each_mc_addr(mc_ptr, netdev) { 2160 netdev_for_each_mc_addr(ha, netdev) {
2162 if (i == rar_entries) { 2161 if (i == rar_entries) {
2163 /* load any remaining addresses into the hash table */ 2162 /* load any remaining addresses into the hash table */
2164 u32 hash_reg, hash_bit, mta; 2163 u32 hash_reg, hash_bit, mta;
2165 hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr); 2164 hash_value = e1000_hash_mc_addr(hw, ha->addr);
2166 hash_reg = (hash_value >> 5) & 0x7F; 2165 hash_reg = (hash_value >> 5) & 0x7F;
2167 hash_bit = hash_value & 0x1F; 2166 hash_bit = hash_value & 0x1F;
2168 mta = (1 << hash_bit); 2167 mta = (1 << hash_bit);
2169 mcarray[hash_reg] |= mta; 2168 mcarray[hash_reg] |= mta;
2170 } else { 2169 } else {
2171 e1000_rar_set(hw, mc_ptr->da_addr, i++); 2170 e1000_rar_set(hw, ha->addr, i++);
2172 } 2171 }
2173 } 2172 }
2174 2173
@@ -2930,7 +2929,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2930 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; 2929 unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
2931 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 2930 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
2932 unsigned int tx_flags = 0; 2931 unsigned int tx_flags = 0;
2933 unsigned int len = skb->len - skb->data_len; 2932 unsigned int len = skb_headlen(skb);
2934 unsigned int nr_frags; 2933 unsigned int nr_frags;
2935 unsigned int mss; 2934 unsigned int mss;
2936 int count = 0; 2935 int count = 0;
@@ -2981,7 +2980,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
2981 dev_kfree_skb_any(skb); 2980 dev_kfree_skb_any(skb);
2982 return NETDEV_TX_OK; 2981 return NETDEV_TX_OK;
2983 } 2982 }
2984 len = skb->len - skb->data_len; 2983 len = skb_headlen(skb);
2985 break; 2984 break;
2986 default: 2985 default:
2987 /* do nothing */ 2986 /* do nothing */
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 712ccc66ba25..4b0016d69530 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -323,7 +323,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
323 } 323 }
324 324
325 /* 325 /*
326 * Initialze device specific counter of SMBI acquisition 326 * Initialize device specific counter of SMBI acquisition
327 * timeouts. 327 * timeouts.
328 */ 328 */
329 hw->dev_spec.e82571.smb_counter = 0; 329 hw->dev_spec.e82571.smb_counter = 0;
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 118bdf483593..12648a1cdb78 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -42,25 +42,16 @@
42 42
43struct e1000_info; 43struct e1000_info;
44 44
45#define e_printk(level, adapter, format, arg...) \
46 printk(level "%s: %s: " format, pci_name(adapter->pdev), \
47 adapter->netdev->name, ## arg)
48
49#ifdef DEBUG
50#define e_dbg(format, arg...) \ 45#define e_dbg(format, arg...) \
51 e_printk(KERN_DEBUG , hw->adapter, format, ## arg) 46 netdev_dbg(hw->adapter->netdev, format, ## arg)
52#else
53#define e_dbg(format, arg...) do { (void)(hw); } while (0)
54#endif
55
56#define e_err(format, arg...) \ 47#define e_err(format, arg...) \
57 e_printk(KERN_ERR, adapter, format, ## arg) 48 netdev_err(adapter->netdev, format, ## arg)
58#define e_info(format, arg...) \ 49#define e_info(format, arg...) \
59 e_printk(KERN_INFO, adapter, format, ## arg) 50 netdev_info(adapter->netdev, format, ## arg)
60#define e_warn(format, arg...) \ 51#define e_warn(format, arg...) \
61 e_printk(KERN_WARNING, adapter, format, ## arg) 52 netdev_warn(adapter->netdev, format, ## arg)
62#define e_notice(format, arg...) \ 53#define e_notice(format, arg...) \
63 e_printk(KERN_NOTICE, adapter, format, ## arg) 54 netdev_notice(adapter->netdev, format, ## arg)
64 55
65 56
66/* Interrupt modes, as used by the IntMode parameter */ 57/* Interrupt modes, as used by the IntMode parameter */
@@ -158,6 +149,9 @@ struct e1000_info;
158#define HV_M_STATUS_SPEED_1000 0x0200 149#define HV_M_STATUS_SPEED_1000 0x0200
159#define HV_M_STATUS_LINK_UP 0x0040 150#define HV_M_STATUS_LINK_UP 0x0040
160 151
152/* Time to wait before putting the device into D3 if there's no link (in ms). */
153#define LINK_TIMEOUT 100
154
161enum e1000_boards { 155enum e1000_boards {
162 board_82571, 156 board_82571,
163 board_82572, 157 board_82572,
@@ -369,6 +363,8 @@ struct e1000_adapter {
369 struct work_struct update_phy_task; 363 struct work_struct update_phy_task;
370 struct work_struct led_blink_task; 364 struct work_struct led_blink_task;
371 struct work_struct print_hang_task; 365 struct work_struct print_hang_task;
366
367 bool idle_check;
372}; 368};
373 369
374struct e1000_info { 370struct e1000_info {
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b5e157e9c87..5059c22155d9 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -1622,7 +1622,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1622 /* Check if the flash descriptor is valid */ 1622 /* Check if the flash descriptor is valid */
1623 if (hsfsts.hsf_status.fldesvalid == 0) { 1623 if (hsfsts.hsf_status.fldesvalid == 0) {
1624 e_dbg("Flash descriptor invalid. " 1624 e_dbg("Flash descriptor invalid. "
1625 "SW Sequencing must be used."); 1625 "SW Sequencing must be used.\n");
1626 return -E1000_ERR_NVM; 1626 return -E1000_ERR_NVM;
1627 } 1627 }
1628 1628
@@ -1671,7 +1671,7 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
1671 hsfsts.hsf_status.flcdone = 1; 1671 hsfsts.hsf_status.flcdone = 1;
1672 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval); 1672 ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
1673 } else { 1673 } else {
1674 e_dbg("Flash controller busy, cannot get access"); 1674 e_dbg("Flash controller busy, cannot get access\n");
1675 } 1675 }
1676 } 1676 }
1677 1677
@@ -1822,7 +1822,7 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
1822 continue; 1822 continue;
1823 } else if (hsfsts.hsf_status.flcdone == 0) { 1823 } else if (hsfsts.hsf_status.flcdone == 0) {
1824 e_dbg("Timeout error - flash cycle " 1824 e_dbg("Timeout error - flash cycle "
1825 "did not complete."); 1825 "did not complete.\n");
1826 break; 1826 break;
1827 } 1827 }
1828 } 1828 }
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index a8b2c0de27c4..b0d2a60aa490 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -1262,24 +1262,21 @@ s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *dup
1262 u32 status; 1262 u32 status;
1263 1263
1264 status = er32(STATUS); 1264 status = er32(STATUS);
1265 if (status & E1000_STATUS_SPEED_1000) { 1265 if (status & E1000_STATUS_SPEED_1000)
1266 *speed = SPEED_1000; 1266 *speed = SPEED_1000;
1267 e_dbg("1000 Mbs, "); 1267 else if (status & E1000_STATUS_SPEED_100)
1268 } else if (status & E1000_STATUS_SPEED_100) {
1269 *speed = SPEED_100; 1268 *speed = SPEED_100;
1270 e_dbg("100 Mbs, "); 1269 else
1271 } else {
1272 *speed = SPEED_10; 1270 *speed = SPEED_10;
1273 e_dbg("10 Mbs, ");
1274 }
1275 1271
1276 if (status & E1000_STATUS_FD) { 1272 if (status & E1000_STATUS_FD)
1277 *duplex = FULL_DUPLEX; 1273 *duplex = FULL_DUPLEX;
1278 e_dbg("Full Duplex\n"); 1274 else
1279 } else {
1280 *duplex = HALF_DUPLEX; 1275 *duplex = HALF_DUPLEX;
1281 e_dbg("Half Duplex\n"); 1276
1282 } 1277 e_dbg("%u Mbps, %s Duplex\n",
1278 *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
1279 *duplex == FULL_DUPLEX ? "Full" : "Half");
1283 1280
1284 return 0; 1281 return 0;
1285} 1282}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 73d43c53015a..5f70c437fa41 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -26,6 +26,8 @@
26 26
27*******************************************************************************/ 27*******************************************************************************/
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/module.h> 31#include <linux/module.h>
30#include <linux/types.h> 32#include <linux/types.h>
31#include <linux/init.h> 33#include <linux/init.h>
@@ -45,6 +47,7 @@
45#include <linux/cpu.h> 47#include <linux/cpu.h>
46#include <linux/smp.h> 48#include <linux/smp.h>
47#include <linux/pm_qos_params.h> 49#include <linux/pm_qos_params.h>
50#include <linux/pm_runtime.h>
48#include <linux/aer.h> 51#include <linux/aer.h>
49 52
50#include "e1000.h" 53#include "e1000.h"
@@ -2565,7 +2568,7 @@ static void e1000_set_multi(struct net_device *netdev)
2565{ 2568{
2566 struct e1000_adapter *adapter = netdev_priv(netdev); 2569 struct e1000_adapter *adapter = netdev_priv(netdev);
2567 struct e1000_hw *hw = &adapter->hw; 2570 struct e1000_hw *hw = &adapter->hw;
2568 struct dev_mc_list *mc_ptr; 2571 struct netdev_hw_addr *ha;
2569 u8 *mta_list; 2572 u8 *mta_list;
2570 u32 rctl; 2573 u32 rctl;
2571 int i; 2574 int i;
@@ -2597,9 +2600,8 @@ static void e1000_set_multi(struct net_device *netdev)
2597 2600
2598 /* prepare a packed array of only addresses. */ 2601 /* prepare a packed array of only addresses. */
2599 i = 0; 2602 i = 0;
2600 netdev_for_each_mc_addr(mc_ptr, netdev) 2603 netdev_for_each_mc_addr(ha, netdev)
2601 memcpy(mta_list + (i++ * ETH_ALEN), 2604 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2602 mc_ptr->dmi_addr, ETH_ALEN);
2603 2605
2604 e1000_update_mc_addr_list(hw, mta_list, i); 2606 e1000_update_mc_addr_list(hw, mta_list, i);
2605 kfree(mta_list); 2607 kfree(mta_list);
@@ -3083,12 +3085,15 @@ static int e1000_open(struct net_device *netdev)
3083{ 3085{
3084 struct e1000_adapter *adapter = netdev_priv(netdev); 3086 struct e1000_adapter *adapter = netdev_priv(netdev);
3085 struct e1000_hw *hw = &adapter->hw; 3087 struct e1000_hw *hw = &adapter->hw;
3088 struct pci_dev *pdev = adapter->pdev;
3086 int err; 3089 int err;
3087 3090
3088 /* disallow open during test */ 3091 /* disallow open during test */
3089 if (test_bit(__E1000_TESTING, &adapter->state)) 3092 if (test_bit(__E1000_TESTING, &adapter->state))
3090 return -EBUSY; 3093 return -EBUSY;
3091 3094
3095 pm_runtime_get_sync(&pdev->dev);
3096
3092 netif_carrier_off(netdev); 3097 netif_carrier_off(netdev);
3093 3098
3094 /* allocate transmit descriptors */ 3099 /* allocate transmit descriptors */
@@ -3149,6 +3154,9 @@ static int e1000_open(struct net_device *netdev)
3149 3154
3150 netif_start_queue(netdev); 3155 netif_start_queue(netdev);
3151 3156
3157 adapter->idle_check = true;
3158 pm_runtime_put(&pdev->dev);
3159
3152 /* fire a link status change interrupt to start the watchdog */ 3160 /* fire a link status change interrupt to start the watchdog */
3153 ew32(ICS, E1000_ICS_LSC); 3161 ew32(ICS, E1000_ICS_LSC);
3154 3162
@@ -3162,6 +3170,7 @@ err_setup_rx:
3162 e1000e_free_tx_resources(adapter); 3170 e1000e_free_tx_resources(adapter);
3163err_setup_tx: 3171err_setup_tx:
3164 e1000e_reset(adapter); 3172 e1000e_reset(adapter);
3173 pm_runtime_put_sync(&pdev->dev);
3165 3174
3166 return err; 3175 return err;
3167} 3176}
@@ -3180,11 +3189,17 @@ err_setup_tx:
3180static int e1000_close(struct net_device *netdev) 3189static int e1000_close(struct net_device *netdev)
3181{ 3190{
3182 struct e1000_adapter *adapter = netdev_priv(netdev); 3191 struct e1000_adapter *adapter = netdev_priv(netdev);
3192 struct pci_dev *pdev = adapter->pdev;
3183 3193
3184 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state)); 3194 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
3185 e1000e_down(adapter); 3195
3196 pm_runtime_get_sync(&pdev->dev);
3197
3198 if (!test_bit(__E1000_DOWN, &adapter->state)) {
3199 e1000e_down(adapter);
3200 e1000_free_irq(adapter);
3201 }
3186 e1000_power_down_phy(adapter); 3202 e1000_power_down_phy(adapter);
3187 e1000_free_irq(adapter);
3188 3203
3189 e1000e_free_tx_resources(adapter); 3204 e1000e_free_tx_resources(adapter);
3190 e1000e_free_rx_resources(adapter); 3205 e1000e_free_rx_resources(adapter);
@@ -3206,6 +3221,8 @@ static int e1000_close(struct net_device *netdev)
3206 if (adapter->flags & FLAG_HAS_AMT) 3221 if (adapter->flags & FLAG_HAS_AMT)
3207 e1000_release_hw_control(adapter); 3222 e1000_release_hw_control(adapter);
3208 3223
3224 pm_runtime_put_sync(&pdev->dev);
3225
3209 return 0; 3226 return 0;
3210} 3227}
3211/** 3228/**
@@ -3550,6 +3567,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3550 3567
3551 link = e1000e_has_link(adapter); 3568 link = e1000e_has_link(adapter);
3552 if ((netif_carrier_ok(netdev)) && link) { 3569 if ((netif_carrier_ok(netdev)) && link) {
3570 /* Cancel scheduled suspend requests. */
3571 pm_runtime_resume(netdev->dev.parent);
3572
3553 e1000e_enable_receives(adapter); 3573 e1000e_enable_receives(adapter);
3554 goto link_up; 3574 goto link_up;
3555 } 3575 }
@@ -3561,6 +3581,10 @@ static void e1000_watchdog_task(struct work_struct *work)
3561 if (link) { 3581 if (link) {
3562 if (!netif_carrier_ok(netdev)) { 3582 if (!netif_carrier_ok(netdev)) {
3563 bool txb2b = 1; 3583 bool txb2b = 1;
3584
3585 /* Cancel scheduled suspend requests. */
3586 pm_runtime_resume(netdev->dev.parent);
3587
3564 /* update snapshot of PHY registers on LSC */ 3588 /* update snapshot of PHY registers on LSC */
3565 e1000_phy_read_status(adapter); 3589 e1000_phy_read_status(adapter);
3566 mac->ops.get_link_up_info(&adapter->hw, 3590 mac->ops.get_link_up_info(&adapter->hw,
@@ -3670,6 +3694,9 @@ static void e1000_watchdog_task(struct work_struct *work)
3670 3694
3671 if (adapter->flags & FLAG_RX_NEEDS_RESTART) 3695 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3672 schedule_work(&adapter->reset_task); 3696 schedule_work(&adapter->reset_task);
3697 else
3698 pm_schedule_suspend(netdev->dev.parent,
3699 LINK_TIMEOUT);
3673 } 3700 }
3674 } 3701 }
3675 3702
@@ -4105,7 +4132,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4105 unsigned int max_per_txd = E1000_MAX_PER_TXD; 4132 unsigned int max_per_txd = E1000_MAX_PER_TXD;
4106 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; 4133 unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
4107 unsigned int tx_flags = 0; 4134 unsigned int tx_flags = 0;
4108 unsigned int len = skb->len - skb->data_len; 4135 unsigned int len = skb_headlen(skb);
4109 unsigned int nr_frags; 4136 unsigned int nr_frags;
4110 unsigned int mss; 4137 unsigned int mss;
4111 int count = 0; 4138 int count = 0;
@@ -4155,7 +4182,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
4155 dev_kfree_skb_any(skb); 4182 dev_kfree_skb_any(skb);
4156 return NETDEV_TX_OK; 4183 return NETDEV_TX_OK;
4157 } 4184 }
4158 len = skb->len - skb->data_len; 4185 len = skb_headlen(skb);
4159 } 4186 }
4160 } 4187 }
4161 4188
@@ -4467,13 +4494,15 @@ out:
4467 return retval; 4494 return retval;
4468} 4495}
4469 4496
4470static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake) 4497static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
4498 bool runtime)
4471{ 4499{
4472 struct net_device *netdev = pci_get_drvdata(pdev); 4500 struct net_device *netdev = pci_get_drvdata(pdev);
4473 struct e1000_adapter *adapter = netdev_priv(netdev); 4501 struct e1000_adapter *adapter = netdev_priv(netdev);
4474 struct e1000_hw *hw = &adapter->hw; 4502 struct e1000_hw *hw = &adapter->hw;
4475 u32 ctrl, ctrl_ext, rctl, status; 4503 u32 ctrl, ctrl_ext, rctl, status;
4476 u32 wufc = adapter->wol; 4504 /* Runtime suspend should only enable wakeup for link changes */
4505 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
4477 int retval = 0; 4506 int retval = 0;
4478 4507
4479 netif_device_detach(netdev); 4508 netif_device_detach(netdev);
@@ -4630,43 +4659,21 @@ static void e1000e_disable_l1aspm(struct pci_dev *pdev)
4630 } 4659 }
4631} 4660}
4632 4661
4633#ifdef CONFIG_PM 4662#ifdef CONFIG_PM_OPS
4634static int e1000_suspend(struct pci_dev *pdev, pm_message_t state) 4663static bool e1000e_pm_ready(struct e1000_adapter *adapter)
4635{ 4664{
4636 int retval; 4665 return !!adapter->tx_ring->buffer_info;
4637 bool wake;
4638
4639 retval = __e1000_shutdown(pdev, &wake);
4640 if (!retval)
4641 e1000_complete_shutdown(pdev, true, wake);
4642
4643 return retval;
4644} 4666}
4645 4667
4646static int e1000_resume(struct pci_dev *pdev) 4668static int __e1000_resume(struct pci_dev *pdev)
4647{ 4669{
4648 struct net_device *netdev = pci_get_drvdata(pdev); 4670 struct net_device *netdev = pci_get_drvdata(pdev);
4649 struct e1000_adapter *adapter = netdev_priv(netdev); 4671 struct e1000_adapter *adapter = netdev_priv(netdev);
4650 struct e1000_hw *hw = &adapter->hw; 4672 struct e1000_hw *hw = &adapter->hw;
4651 u32 err; 4673 u32 err;
4652 4674
4653 pci_set_power_state(pdev, PCI_D0);
4654 pci_restore_state(pdev);
4655 pci_save_state(pdev);
4656 e1000e_disable_l1aspm(pdev); 4675 e1000e_disable_l1aspm(pdev);
4657 4676
4658 err = pci_enable_device_mem(pdev);
4659 if (err) {
4660 dev_err(&pdev->dev,
4661 "Cannot enable PCI device from suspend\n");
4662 return err;
4663 }
4664
4665 pci_set_master(pdev);
4666
4667 pci_enable_wake(pdev, PCI_D3hot, 0);
4668 pci_enable_wake(pdev, PCI_D3cold, 0);
4669
4670 e1000e_set_interrupt_capability(adapter); 4677 e1000e_set_interrupt_capability(adapter);
4671 if (netif_running(netdev)) { 4678 if (netif_running(netdev)) {
4672 err = e1000_request_irq(adapter); 4679 err = e1000_request_irq(adapter);
@@ -4724,13 +4731,88 @@ static int e1000_resume(struct pci_dev *pdev)
4724 4731
4725 return 0; 4732 return 0;
4726} 4733}
4727#endif 4734
4735#ifdef CONFIG_PM_SLEEP
4736static int e1000_suspend(struct device *dev)
4737{
4738 struct pci_dev *pdev = to_pci_dev(dev);
4739 int retval;
4740 bool wake;
4741
4742 retval = __e1000_shutdown(pdev, &wake, false);
4743 if (!retval)
4744 e1000_complete_shutdown(pdev, true, wake);
4745
4746 return retval;
4747}
4748
4749static int e1000_resume(struct device *dev)
4750{
4751 struct pci_dev *pdev = to_pci_dev(dev);
4752 struct net_device *netdev = pci_get_drvdata(pdev);
4753 struct e1000_adapter *adapter = netdev_priv(netdev);
4754
4755 if (e1000e_pm_ready(adapter))
4756 adapter->idle_check = true;
4757
4758 return __e1000_resume(pdev);
4759}
4760#endif /* CONFIG_PM_SLEEP */
4761
4762#ifdef CONFIG_PM_RUNTIME
4763static int e1000_runtime_suspend(struct device *dev)
4764{
4765 struct pci_dev *pdev = to_pci_dev(dev);
4766 struct net_device *netdev = pci_get_drvdata(pdev);
4767 struct e1000_adapter *adapter = netdev_priv(netdev);
4768
4769 if (e1000e_pm_ready(adapter)) {
4770 bool wake;
4771
4772 __e1000_shutdown(pdev, &wake, true);
4773 }
4774
4775 return 0;
4776}
4777
4778static int e1000_idle(struct device *dev)
4779{
4780 struct pci_dev *pdev = to_pci_dev(dev);
4781 struct net_device *netdev = pci_get_drvdata(pdev);
4782 struct e1000_adapter *adapter = netdev_priv(netdev);
4783
4784 if (!e1000e_pm_ready(adapter))
4785 return 0;
4786
4787 if (adapter->idle_check) {
4788 adapter->idle_check = false;
4789 if (!e1000e_has_link(adapter))
4790 pm_schedule_suspend(dev, MSEC_PER_SEC);
4791 }
4792
4793 return -EBUSY;
4794}
4795
4796static int e1000_runtime_resume(struct device *dev)
4797{
4798 struct pci_dev *pdev = to_pci_dev(dev);
4799 struct net_device *netdev = pci_get_drvdata(pdev);
4800 struct e1000_adapter *adapter = netdev_priv(netdev);
4801
4802 if (!e1000e_pm_ready(adapter))
4803 return 0;
4804
4805 adapter->idle_check = !dev->power.runtime_auto;
4806 return __e1000_resume(pdev);
4807}
4808#endif /* CONFIG_PM_RUNTIME */
4809#endif /* CONFIG_PM_OPS */
4728 4810
4729static void e1000_shutdown(struct pci_dev *pdev) 4811static void e1000_shutdown(struct pci_dev *pdev)
4730{ 4812{
4731 bool wake = false; 4813 bool wake = false;
4732 4814
4733 __e1000_shutdown(pdev, &wake); 4815 __e1000_shutdown(pdev, &wake, false);
4734 4816
4735 if (system_state == SYSTEM_POWER_OFF) 4817 if (system_state == SYSTEM_POWER_OFF)
4736 e1000_complete_shutdown(pdev, false, wake); 4818 e1000_complete_shutdown(pdev, false, wake);
@@ -4803,8 +4885,8 @@ static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
4803 result = PCI_ERS_RESULT_DISCONNECT; 4885 result = PCI_ERS_RESULT_DISCONNECT;
4804 } else { 4886 } else {
4805 pci_set_master(pdev); 4887 pci_set_master(pdev);
4888 pdev->state_saved = true;
4806 pci_restore_state(pdev); 4889 pci_restore_state(pdev);
4807 pci_save_state(pdev);
4808 4890
4809 pci_enable_wake(pdev, PCI_D3hot, 0); 4891 pci_enable_wake(pdev, PCI_D3hot, 0);
4810 pci_enable_wake(pdev, PCI_D3cold, 0); 4892 pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -5211,6 +5293,12 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5211 5293
5212 e1000_print_device_info(adapter); 5294 e1000_print_device_info(adapter);
5213 5295
5296 if (pci_dev_run_wake(pdev)) {
5297 pm_runtime_set_active(&pdev->dev);
5298 pm_runtime_enable(&pdev->dev);
5299 }
5300 pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
5301
5214 return 0; 5302 return 0;
5215 5303
5216err_register: 5304err_register:
@@ -5253,12 +5341,16 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5253{ 5341{
5254 struct net_device *netdev = pci_get_drvdata(pdev); 5342 struct net_device *netdev = pci_get_drvdata(pdev);
5255 struct e1000_adapter *adapter = netdev_priv(netdev); 5343 struct e1000_adapter *adapter = netdev_priv(netdev);
5344 bool down = test_bit(__E1000_DOWN, &adapter->state);
5345
5346 pm_runtime_get_sync(&pdev->dev);
5256 5347
5257 /* 5348 /*
5258 * flush_scheduled work may reschedule our watchdog task, so 5349 * flush_scheduled work may reschedule our watchdog task, so
5259 * explicitly disable watchdog tasks from being rescheduled 5350 * explicitly disable watchdog tasks from being rescheduled
5260 */ 5351 */
5261 set_bit(__E1000_DOWN, &adapter->state); 5352 if (!down)
5353 set_bit(__E1000_DOWN, &adapter->state);
5262 del_timer_sync(&adapter->watchdog_timer); 5354 del_timer_sync(&adapter->watchdog_timer);
5263 del_timer_sync(&adapter->phy_info_timer); 5355 del_timer_sync(&adapter->phy_info_timer);
5264 5356
@@ -5272,8 +5364,17 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5272 if (!(netdev->flags & IFF_UP)) 5364 if (!(netdev->flags & IFF_UP))
5273 e1000_power_down_phy(adapter); 5365 e1000_power_down_phy(adapter);
5274 5366
5367 /* Don't lie to e1000_close() down the road. */
5368 if (!down)
5369 clear_bit(__E1000_DOWN, &adapter->state);
5275 unregister_netdev(netdev); 5370 unregister_netdev(netdev);
5276 5371
5372 if (pci_dev_run_wake(pdev)) {
5373 pm_runtime_disable(&pdev->dev);
5374 pm_runtime_set_suspended(&pdev->dev);
5375 }
5376 pm_runtime_put_noidle(&pdev->dev);
5377
5277 /* 5378 /*
5278 * Release control of h/w to f/w. If f/w is AMT enabled, this 5379 * Release control of h/w to f/w. If f/w is AMT enabled, this
5279 * would have already happened in close and is redundant. 5380 * would have already happened in close and is redundant.
@@ -5373,16 +5474,22 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
5373}; 5474};
5374MODULE_DEVICE_TABLE(pci, e1000_pci_tbl); 5475MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
5375 5476
5477#ifdef CONFIG_PM_OPS
5478static const struct dev_pm_ops e1000_pm_ops = {
5479 SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
5480 SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
5481 e1000_runtime_resume, e1000_idle)
5482};
5483#endif
5484
5376/* PCI Device API Driver */ 5485/* PCI Device API Driver */
5377static struct pci_driver e1000_driver = { 5486static struct pci_driver e1000_driver = {
5378 .name = e1000e_driver_name, 5487 .name = e1000e_driver_name,
5379 .id_table = e1000_pci_tbl, 5488 .id_table = e1000_pci_tbl,
5380 .probe = e1000_probe, 5489 .probe = e1000_probe,
5381 .remove = __devexit_p(e1000_remove), 5490 .remove = __devexit_p(e1000_remove),
5382#ifdef CONFIG_PM 5491#ifdef CONFIG_PM_OPS
5383 /* Power Management Hooks */ 5492 .driver.pm = &e1000_pm_ops,
5384 .suspend = e1000_suspend,
5385 .resume = e1000_resume,
5386#endif 5493#endif
5387 .shutdown = e1000_shutdown, 5494 .shutdown = e1000_shutdown,
5388 .err_handler = &e1000_err_handler 5495 .err_handler = &e1000_err_handler
@@ -5397,10 +5504,9 @@ static struct pci_driver e1000_driver = {
5397static int __init e1000_init_module(void) 5504static int __init e1000_init_module(void)
5398{ 5505{
5399 int ret; 5506 int ret;
5400 printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n", 5507 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
5401 e1000e_driver_name, e1000e_driver_version); 5508 e1000e_driver_version);
5402 printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n", 5509 pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
5403 e1000e_driver_name);
5404 ret = pci_register_driver(&e1000_driver); 5510 ret = pci_register_driver(&e1000_driver);
5405 5511
5406 return ret; 5512 return ret;
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 2e399778cae5..f775a481063d 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -248,7 +248,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
248 } 248 }
249 249
250 { /* Transmit Interrupt Delay */ 250 { /* Transmit Interrupt Delay */
251 const struct e1000_option opt = { 251 static const struct e1000_option opt = {
252 .type = range_option, 252 .type = range_option,
253 .name = "Transmit Interrupt Delay", 253 .name = "Transmit Interrupt Delay",
254 .err = "using default of " 254 .err = "using default of "
@@ -267,7 +267,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
267 } 267 }
268 } 268 }
269 { /* Transmit Absolute Interrupt Delay */ 269 { /* Transmit Absolute Interrupt Delay */
270 const struct e1000_option opt = { 270 static const struct e1000_option opt = {
271 .type = range_option, 271 .type = range_option,
272 .name = "Transmit Absolute Interrupt Delay", 272 .name = "Transmit Absolute Interrupt Delay",
273 .err = "using default of " 273 .err = "using default of "
@@ -305,7 +305,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
305 } 305 }
306 } 306 }
307 { /* Receive Absolute Interrupt Delay */ 307 { /* Receive Absolute Interrupt Delay */
308 const struct e1000_option opt = { 308 static const struct e1000_option opt = {
309 .type = range_option, 309 .type = range_option,
310 .name = "Receive Absolute Interrupt Delay", 310 .name = "Receive Absolute Interrupt Delay",
311 .err = "using default of " 311 .err = "using default of "
@@ -324,7 +324,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
324 } 324 }
325 } 325 }
326 { /* Interrupt Throttling Rate */ 326 { /* Interrupt Throttling Rate */
327 const struct e1000_option opt = { 327 static const struct e1000_option opt = {
328 .type = range_option, 328 .type = range_option,
329 .name = "Interrupt Throttling Rate (ints/sec)", 329 .name = "Interrupt Throttling Rate (ints/sec)",
330 .err = "using default of " 330 .err = "using default of "
@@ -399,7 +399,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
399 } 399 }
400 } 400 }
401 { /* Smart Power Down */ 401 { /* Smart Power Down */
402 const struct e1000_option opt = { 402 static const struct e1000_option opt = {
403 .type = enable_option, 403 .type = enable_option,
404 .name = "PHY Smart Power Down", 404 .name = "PHY Smart Power Down",
405 .err = "defaulting to Disabled", 405 .err = "defaulting to Disabled",
@@ -415,7 +415,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
415 } 415 }
416 } 416 }
417 { /* CRC Stripping */ 417 { /* CRC Stripping */
418 const struct e1000_option opt = { 418 static const struct e1000_option opt = {
419 .type = enable_option, 419 .type = enable_option,
420 .name = "CRC Stripping", 420 .name = "CRC Stripping",
421 .err = "defaulting to enabled", 421 .err = "defaulting to enabled",
@@ -432,7 +432,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
432 } 432 }
433 } 433 }
434 { /* Kumeran Lock Loss Workaround */ 434 { /* Kumeran Lock Loss Workaround */
435 const struct e1000_option opt = { 435 static const struct e1000_option opt = {
436 .type = enable_option, 436 .type = enable_option,
437 .name = "Kumeran Lock Loss Workaround", 437 .name = "Kumeran Lock Loss Workaround",
438 .err = "defaulting to Enabled", 438 .err = "defaulting to Enabled",
@@ -452,7 +452,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
452 } 452 }
453 } 453 }
454 { /* Write-protect NVM */ 454 { /* Write-protect NVM */
455 const struct e1000_option opt = { 455 static const struct e1000_option opt = {
456 .type = enable_option, 456 .type = enable_option,
457 .name = "Write-protect NVM", 457 .name = "Write-protect NVM",
458 .err = "defaulting to Enabled", 458 .err = "defaulting to Enabled",
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 27c7bdbfa003..eed65d821e47 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -645,7 +645,7 @@ static void __init printEEPROMInfo(struct net_device *dev)
645 if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE "); 645 if (GetBit(Word,ee_PortTPE)) printk(KERN_DEBUG "TPE ");
646 if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC "); 646 if (GetBit(Word,ee_PortBNC)) printk(KERN_DEBUG "BNC ");
647 if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI "); 647 if (GetBit(Word,ee_PortAUI)) printk(KERN_DEBUG "AUI ");
648 printk(KERN_DEBUG "port(s) \n"); 648 printk(KERN_DEBUG "port(s)\n");
649 649
650 Word = lp->word[6]; 650 Word = lp->word[6];
651 printk(KERN_DEBUG "Word6:\n"); 651 printk(KERN_DEBUG "Word6:\n");
@@ -765,7 +765,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
765 /* Grab the region so we can find another board if autoIRQ fails. */ 765 /* Grab the region so we can find another board if autoIRQ fails. */
766 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) { 766 if (!request_region(ioaddr, EEPRO_IO_EXTENT, DRV_NAME)) {
767 if (!autoprobe) 767 if (!autoprobe)
768 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use \n", 768 printk(KERN_WARNING "EEPRO: io-port 0x%04x in use\n",
769 ioaddr); 769 ioaddr);
770 return -EBUSY; 770 return -EBUSY;
771 } 771 }
@@ -1286,7 +1286,7 @@ set_multicast_list(struct net_device *dev)
1286 struct eepro_local *lp = netdev_priv(dev); 1286 struct eepro_local *lp = netdev_priv(dev);
1287 short ioaddr = dev->base_addr; 1287 short ioaddr = dev->base_addr;
1288 unsigned short mode; 1288 unsigned short mode;
1289 struct dev_mc_list *dmi; 1289 struct netdev_hw_addr *ha;
1290 int mc_count = netdev_mc_count(dev); 1290 int mc_count = netdev_mc_count(dev);
1291 1291
1292 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63) 1292 if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
@@ -1331,8 +1331,8 @@ set_multicast_list(struct net_device *dev)
1331 outw(0, ioaddr + IO_PORT); 1331 outw(0, ioaddr + IO_PORT);
1332 outw(6 * (mc_count + 1), ioaddr + IO_PORT); 1332 outw(6 * (mc_count + 1), ioaddr + IO_PORT);
1333 1333
1334 netdev_for_each_mc_addr(dmi, dev) { 1334 netdev_for_each_mc_addr(ha, dev) {
1335 eaddrs = (unsigned short *) dmi->dmi_addr; 1335 eaddrs = (unsigned short *) ha->addr;
1336 outw(*eaddrs++, ioaddr + IO_PORT); 1336 outw(*eaddrs++, ioaddr + IO_PORT);
1337 outw(*eaddrs++, ioaddr + IO_PORT); 1337 outw(*eaddrs++, ioaddr + IO_PORT);
1338 outw(*eaddrs++, ioaddr + IO_PORT); 1338 outw(*eaddrs++, ioaddr + IO_PORT);
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 1a7322b80ea7..c31dd0685553 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1575,7 +1575,7 @@ static void eexp_hw_init586(struct net_device *dev)
1575 1575
1576static void eexp_setup_filter(struct net_device *dev) 1576static void eexp_setup_filter(struct net_device *dev)
1577{ 1577{
1578 struct dev_mc_list *dmi; 1578 struct netdev_hw_addr *ha;
1579 unsigned short ioaddr = dev->base_addr; 1579 unsigned short ioaddr = dev->base_addr;
1580 int count = netdev_mc_count(dev); 1580 int count = netdev_mc_count(dev);
1581 int i; 1581 int i;
@@ -1588,8 +1588,8 @@ static void eexp_setup_filter(struct net_device *dev)
1588 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR); 1588 outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
1589 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST)); 1589 outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
1590 i = 0; 1590 i = 0;
1591 netdev_for_each_mc_addr(dmi, dev) { 1591 netdev_for_each_mc_addr(ha, dev) {
1592 unsigned short *data = (unsigned short *) dmi->dmi_addr; 1592 unsigned short *data = (unsigned short *) ha->addr;
1593 1593
1594 if (i == count) 1594 if (i == count)
1595 break; 1595 break;
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 809ccc9ff09c..3f445efa9482 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1618,7 +1618,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1618{ 1618{
1619 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1619 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1620 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1620 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1621 int skb_data_size = skb->len - skb->data_len; 1621 int skb_data_size = skb_headlen(skb);
1622 int headersize; 1622 int headersize;
1623 1623
1624 /* Packet is TCP with TSO enabled */ 1624 /* Packet is TCP with TSO enabled */
@@ -1629,7 +1629,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1629 */ 1629 */
1630 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); 1630 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1631 1631
1632 skb_data_size = skb->len - skb->data_len; 1632 skb_data_size = skb_headlen(skb);
1633 1633
1634 if (skb_data_size >= headersize) { 1634 if (skb_data_size >= headersize) {
1635 /* copy immediate data */ 1635 /* copy immediate data */
@@ -1651,7 +1651,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1651static void write_swqe2_nonTSO(struct sk_buff *skb, 1651static void write_swqe2_nonTSO(struct sk_buff *skb,
1652 struct ehea_swqe *swqe, u32 lkey) 1652 struct ehea_swqe *swqe, u32 lkey)
1653{ 1653{
1654 int skb_data_size = skb->len - skb->data_len; 1654 int skb_data_size = skb_headlen(skb);
1655 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; 1655 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1656 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; 1656 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1657 1657
@@ -1967,7 +1967,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
1967static void ehea_set_multicast_list(struct net_device *dev) 1967static void ehea_set_multicast_list(struct net_device *dev)
1968{ 1968{
1969 struct ehea_port *port = netdev_priv(dev); 1969 struct ehea_port *port = netdev_priv(dev);
1970 struct dev_mc_list *k_mcl_entry; 1970 struct netdev_hw_addr *ha;
1971 int ret; 1971 int ret;
1972 1972
1973 if (dev->flags & IFF_PROMISC) { 1973 if (dev->flags & IFF_PROMISC) {
@@ -1998,8 +1998,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
1998 goto out; 1998 goto out;
1999 } 1999 }
2000 2000
2001 netdev_for_each_mc_addr(k_mcl_entry, dev) 2001 netdev_for_each_mc_addr(ha, dev)
2002 ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr); 2002 ehea_add_multicast_entry(port, ha->addr);
2003 2003
2004 } 2004 }
2005out: 2005out:
@@ -2108,8 +2108,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2108 } else { 2108 } else {
2109 /* first copy data from the skb->data buffer ... */ 2109 /* first copy data from the skb->data buffer ... */
2110 skb_copy_from_linear_data(skb, imm_data, 2110 skb_copy_from_linear_data(skb, imm_data,
2111 skb->len - skb->data_len); 2111 skb_headlen(skb));
2112 imm_data += skb->len - skb->data_len; 2112 imm_data += skb_headlen(skb);
2113 2113
2114 /* ... then copy data from the fragments */ 2114 /* ... then copy data from the fragments */
2115 for (i = 0; i < nfrags; i++) { 2115 for (i = 0; i < nfrags; i++) {
diff --git a/drivers/net/enic/cq_enet_desc.h b/drivers/net/enic/cq_enet_desc.h
index 03dce9ed612c..337d1943af46 100644
--- a/drivers/net/enic/cq_enet_desc.h
+++ b/drivers/net/enic/cq_enet_desc.h
@@ -101,14 +101,18 @@ static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc,
101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok, 101 u8 *tcp_udp_csum_ok, u8 *udp, u8 *tcp, u8 *ipv4_csum_ok,
102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok) 102 u8 *ipv6, u8 *ipv4, u8 *ipv4_fragment, u8 *fcs_ok)
103{ 103{
104 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); 104 u16 completed_index_flags;
105 u16 q_number_rss_type_flags = 105 u16 q_number_rss_type_flags;
106 le16_to_cpu(desc->q_number_rss_type_flags); 106 u16 bytes_written_flags;
107 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
108 107
109 cq_desc_dec((struct cq_desc *)desc, type, 108 cq_desc_dec((struct cq_desc *)desc, type,
110 color, q_number, completed_index); 109 color, q_number, completed_index);
111 110
111 completed_index_flags = le16_to_cpu(desc->completed_index_flags);
112 q_number_rss_type_flags =
113 le16_to_cpu(desc->q_number_rss_type_flags);
114 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags);
115
112 *ingress_port = (completed_index_flags & 116 *ingress_port = (completed_index_flags &
113 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0; 117 CQ_ENET_RQ_DESC_FLAGS_INGRESS_PORT) ? 1 : 0;
114 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ? 118 *fcoe = (completed_index_flags & CQ_ENET_RQ_DESC_FLAGS_FCOE) ?
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index ee01f5a6d0d4..5fa56f1e5590 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -33,8 +33,8 @@
33#include "vnic_rss.h" 33#include "vnic_rss.h"
34 34
35#define DRV_NAME "enic" 35#define DRV_NAME "enic"
36#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver" 36#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
37#define DRV_VERSION "1.1.0.241a" 37#define DRV_VERSION "1.3.1.1"
38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc" 38#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
39#define PFX DRV_NAME ": " 39#define PFX DRV_NAME ": "
40 40
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index cf098bb636b8..1232887c243d 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -822,14 +822,14 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
822static void enic_set_multicast_list(struct net_device *netdev) 822static void enic_set_multicast_list(struct net_device *netdev)
823{ 823{
824 struct enic *enic = netdev_priv(netdev); 824 struct enic *enic = netdev_priv(netdev);
825 struct dev_mc_list *list; 825 struct netdev_hw_addr *ha;
826 int directed = 1; 826 int directed = 1;
827 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0; 827 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
828 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0; 828 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0; 829 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
830 unsigned int mc_count = netdev_mc_count(netdev); 830 unsigned int mc_count = netdev_mc_count(netdev);
831 int allmulti = (netdev->flags & IFF_ALLMULTI) || 831 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
832 mc_count > ENIC_MULTICAST_PERFECT_FILTERS; 832 mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
833 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0); 833 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
834 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 834 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
835 unsigned int i, j; 835 unsigned int i, j;
@@ -852,10 +852,10 @@ static void enic_set_multicast_list(struct net_device *netdev)
852 */ 852 */
853 853
854 i = 0; 854 i = 0;
855 netdev_for_each_mc_addr(list, netdev) { 855 netdev_for_each_mc_addr(ha, netdev) {
856 if (i == mc_count) 856 if (i == mc_count)
857 break; 857 break;
858 memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN); 858 memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
859 } 859 }
860 860
861 for (i = 0; i < enic->mc_count; i++) { 861 for (i = 0; i < enic->mc_count; i++) {
@@ -2058,8 +2058,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
2058 netdev->watchdog_timeo = 2 * HZ; 2058 netdev->watchdog_timeo = 2 * HZ;
2059 netdev->ethtool_ops = &enic_ethtool_ops; 2059 netdev->ethtool_ops = &enic_ethtool_ops;
2060 2060
2061 netdev->features |= NETIF_F_HW_VLAN_TX | 2061 netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2062 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
2063 if (ENIC_SETTING(enic, TXCSUM)) 2062 if (ENIC_SETTING(enic, TXCSUM))
2064 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; 2063 netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM;
2065 if (ENIC_SETTING(enic, TSO)) 2064 if (ENIC_SETTING(enic, TSO))
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index cf22de71014e..d43a9d43bbff 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -574,22 +574,18 @@ int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr)
574 return err; 574 return err;
575} 575}
576 576
577int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr) 577int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
578 void *notify_addr, dma_addr_t notify_pa, u16 intr)
578{ 579{
579 u64 a0, a1; 580 u64 a0, a1;
580 int wait = 1000; 581 int wait = 1000;
581 int r; 582 int r;
582 583
583 if (!vdev->notify) { 584 memset(notify_addr, 0, sizeof(struct vnic_devcmd_notify));
584 vdev->notify = pci_alloc_consistent(vdev->pdev, 585 vdev->notify = notify_addr;
585 sizeof(struct vnic_devcmd_notify), 586 vdev->notify_pa = notify_pa;
586 &vdev->notify_pa);
587 if (!vdev->notify)
588 return -ENOMEM;
589 memset(vdev->notify, 0, sizeof(struct vnic_devcmd_notify));
590 }
591 587
592 a0 = vdev->notify_pa; 588 a0 = (u64)notify_pa;
593 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL; 589 a1 = ((u64)intr << 32) & 0x0000ffff00000000ULL;
594 a1 += sizeof(struct vnic_devcmd_notify); 590 a1 += sizeof(struct vnic_devcmd_notify);
595 591
@@ -598,7 +594,27 @@ int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
598 return r; 594 return r;
599} 595}
600 596
601void vnic_dev_notify_unset(struct vnic_dev *vdev) 597int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr)
598{
599 void *notify_addr;
600 dma_addr_t notify_pa;
601
602 if (vdev->notify || vdev->notify_pa) {
603 printk(KERN_ERR "notify block %p still allocated",
604 vdev->notify);
605 return -EINVAL;
606 }
607
608 notify_addr = pci_alloc_consistent(vdev->pdev,
609 sizeof(struct vnic_devcmd_notify),
610 &notify_pa);
611 if (!notify_addr)
612 return -ENOMEM;
613
614 return vnic_dev_notify_setcmd(vdev, notify_addr, notify_pa, intr);
615}
616
617void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev)
602{ 618{
603 u64 a0, a1; 619 u64 a0, a1;
604 int wait = 1000; 620 int wait = 1000;
@@ -608,9 +624,23 @@ void vnic_dev_notify_unset(struct vnic_dev *vdev)
608 a1 += sizeof(struct vnic_devcmd_notify); 624 a1 += sizeof(struct vnic_devcmd_notify);
609 625
610 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait); 626 vnic_dev_cmd(vdev, CMD_NOTIFY, &a0, &a1, wait);
627 vdev->notify = NULL;
628 vdev->notify_pa = 0;
611 vdev->notify_sz = 0; 629 vdev->notify_sz = 0;
612} 630}
613 631
632void vnic_dev_notify_unset(struct vnic_dev *vdev)
633{
634 if (vdev->notify) {
635 pci_free_consistent(vdev->pdev,
636 sizeof(struct vnic_devcmd_notify),
637 vdev->notify,
638 vdev->notify_pa);
639 }
640
641 vnic_dev_notify_unsetcmd(vdev);
642}
643
614static int vnic_dev_notify_ready(struct vnic_dev *vdev) 644static int vnic_dev_notify_ready(struct vnic_dev *vdev)
615{ 645{
616 u32 *words; 646 u32 *words;
diff --git a/drivers/net/enic/vnic_dev.h b/drivers/net/enic/vnic_dev.h
index fc5e3eb35a5e..f5be640b0b5c 100644
--- a/drivers/net/enic/vnic_dev.h
+++ b/drivers/net/enic/vnic_dev.h
@@ -107,7 +107,10 @@ void vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
107void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr); 107void vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
108int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr); 108int vnic_dev_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
109int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr); 109int vnic_dev_raise_intr(struct vnic_dev *vdev, u16 intr);
110int vnic_dev_notify_setcmd(struct vnic_dev *vdev,
111 void *notify_addr, dma_addr_t notify_pa, u16 intr);
110int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr); 112int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
113void vnic_dev_notify_unsetcmd(struct vnic_dev *vdev);
111void vnic_dev_notify_unset(struct vnic_dev *vdev); 114void vnic_dev_notify_unset(struct vnic_dev *vdev);
112int vnic_dev_link_status(struct vnic_dev *vdev); 115int vnic_dev_link_status(struct vnic_dev *vdev);
113u32 vnic_dev_port_speed(struct vnic_dev *vdev); 116u32 vnic_dev_port_speed(struct vnic_dev *vdev);
diff --git a/drivers/net/enic/vnic_rq.c b/drivers/net/enic/vnic_rq.c
index e186efaf9da1..cc580cfec41d 100644
--- a/drivers/net/enic/vnic_rq.c
+++ b/drivers/net/enic/vnic_rq.c
@@ -168,10 +168,10 @@ int vnic_rq_disable(struct vnic_rq *rq)
168 iowrite32(0, &rq->ctrl->enable); 168 iowrite32(0, &rq->ctrl->enable);
169 169
170 /* Wait for HW to ACK disable request */ 170 /* Wait for HW to ACK disable request */
171 for (wait = 0; wait < 100; wait++) { 171 for (wait = 0; wait < 1000; wait++) {
172 if (!(ioread32(&rq->ctrl->running))) 172 if (!(ioread32(&rq->ctrl->running)))
173 return 0; 173 return 0;
174 udelay(1); 174 udelay(10);
175 } 175 }
176 176
177 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index); 177 printk(KERN_ERR "Failed to disable RQ[%d]\n", rq->index);
diff --git a/drivers/net/enic/vnic_wq.c b/drivers/net/enic/vnic_wq.c
index d5f984357f5c..1378afbdfe67 100644
--- a/drivers/net/enic/vnic_wq.c
+++ b/drivers/net/enic/vnic_wq.c
@@ -161,10 +161,10 @@ int vnic_wq_disable(struct vnic_wq *wq)
161 iowrite32(0, &wq->ctrl->enable); 161 iowrite32(0, &wq->ctrl->enable);
162 162
163 /* Wait for HW to ACK disable request */ 163 /* Wait for HW to ACK disable request */
164 for (wait = 0; wait < 100; wait++) { 164 for (wait = 0; wait < 1000; wait++) {
165 if (!(ioread32(&wq->ctrl->running))) 165 if (!(ioread32(&wq->ctrl->running)))
166 return 0; 166 return 0;
167 udelay(1); 167 udelay(10);
168 } 168 }
169 169
170 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index); 170 printk(KERN_ERR "Failed to disable WQ[%d]\n", wq->index);
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 7a567201e829..8b5a203d3aa2 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1399,12 +1399,12 @@ static void set_rx_mode(struct net_device *dev)
1399 outl(0x0004, ioaddr + RxCtrl); 1399 outl(0x0004, ioaddr + RxCtrl);
1400 return; 1400 return;
1401 } else { /* Never executed, for now. */ 1401 } else { /* Never executed, for now. */
1402 struct dev_mc_list *mclist; 1402 struct netdev_hw_addr *ha;
1403 1403
1404 memset(mc_filter, 0, sizeof(mc_filter)); 1404 memset(mc_filter, 0, sizeof(mc_filter));
1405 netdev_for_each_mc_addr(mclist, dev) { 1405 netdev_for_each_mc_addr(ha, dev) {
1406 unsigned int bit_nr = 1406 unsigned int bit_nr =
1407 ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f; 1407 ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1408 mc_filter[bit_nr >> 3] |= (1 << bit_nr); 1408 mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1409 } 1409 }
1410 } 1410 }
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index a8d92503226e..6bd03c8b8886 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -756,7 +756,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
756{ 756{
757 struct ethoc *priv = netdev_priv(dev); 757 struct ethoc *priv = netdev_priv(dev);
758 u32 mode = ethoc_read(priv, MODER); 758 u32 mode = ethoc_read(priv, MODER);
759 struct dev_mc_list *mc; 759 struct netdev_hw_addr *ha;
760 u32 hash[2] = { 0, 0 }; 760 u32 hash[2] = { 0, 0 };
761 761
762 /* set loopback mode if requested */ 762 /* set loopback mode if requested */
@@ -784,8 +784,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
784 hash[0] = 0xffffffff; 784 hash[0] = 0xffffffff;
785 hash[1] = 0xffffffff; 785 hash[1] = 0xffffffff;
786 } else { 786 } else {
787 netdev_for_each_mc_addr(mc, dev) { 787 netdev_for_each_mc_addr(ha, dev) {
788 u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr); 788 u32 crc = ether_crc(ETH_ALEN, ha->addr);
789 int bit = (crc >> 26) & 0x3f; 789 int bit = (crc >> 26) & 0x3f;
790 hash[bit >> 5] |= 1 << (bit & 0x1f); 790 hash[bit >> 5] |= 1 << (bit & 0x1f);
791 } 791 }
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index 91e59f3a9d6d..11ba70f49971 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev)
1169static void SetMulticastFilter(struct net_device *dev) 1169static void SetMulticastFilter(struct net_device *dev)
1170{ 1170{
1171 struct ewrk3_private *lp = netdev_priv(dev); 1171 struct ewrk3_private *lp = netdev_priv(dev);
1172 struct dev_mc_list *dmi; 1172 struct netdev_hw_addr *ha;
1173 u_long iobase = dev->base_addr; 1173 u_long iobase = dev->base_addr;
1174 int i; 1174 int i;
1175 char *addrs, bit, byte; 1175 char *addrs, bit, byte;
@@ -1213,8 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev)
1213 } 1213 }
1214 1214
1215 /* Update table */ 1215 /* Update table */
1216 netdev_for_each_mc_addr(dmi, dev) { 1216 netdev_for_each_mc_addr(ha, dev) {
1217 addrs = dmi->dmi_addr; 1217 addrs = ha->addr;
1218 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1218 if ((*addrs & 0x01) == 1) { /* multicast address? */
1219 crc = ether_crc_le(ETH_ALEN, addrs); 1219 crc = ether_crc_le(ETH_ALEN, addrs);
1220 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */ 1220 hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
@@ -1776,8 +1776,7 @@ static int ewrk3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1776 break; 1776 break;
1777 case EWRK3_SET_MCA: /* Set a multicast address */ 1777 case EWRK3_SET_MCA: /* Set a multicast address */
1778 if (capable(CAP_NET_ADMIN)) { 1778 if (capable(CAP_NET_ADMIN)) {
1779 if (ioc->len > 1024) 1779 if (ioc->len > HASH_TABLE_LEN) {
1780 {
1781 status = -EINVAL; 1780 status = -EINVAL;
1782 break; 1781 break;
1783 } 1782 }
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index d11ae5197f01..51b738dd6547 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1791,12 +1791,12 @@ static void __set_rx_mode(struct net_device *dev)
1791 memset(mc_filter, 0xff, sizeof(mc_filter)); 1791 memset(mc_filter, 0xff, sizeof(mc_filter));
1792 rx_mode = CR_W_AB | CR_W_AM; 1792 rx_mode = CR_W_AB | CR_W_AM;
1793 } else { 1793 } else {
1794 struct dev_mc_list *mclist; 1794 struct netdev_hw_addr *ha;
1795 1795
1796 memset(mc_filter, 0, sizeof(mc_filter)); 1796 memset(mc_filter, 0, sizeof(mc_filter));
1797 netdev_for_each_mc_addr(mclist, dev) { 1797 netdev_for_each_mc_addr(ha, dev) {
1798 unsigned int bit; 1798 unsigned int bit;
1799 bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1799 bit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1800 mc_filter[bit >> 5] |= (1 << bit); 1800 mc_filter[bit >> 5] |= (1 << bit);
1801 } 1801 }
1802 rx_mode = CR_W_AB | CR_W_AM; 1802 rx_mode = CR_W_AB | CR_W_AM;
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 9f98c1c4a344..2b1651aee13f 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -40,6 +40,7 @@
40#include <linux/irq.h> 40#include <linux/irq.h>
41#include <linux/clk.h> 41#include <linux/clk.h>
42#include <linux/platform_device.h> 42#include <linux/platform_device.h>
43#include <linux/phy.h>
43 44
44#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
45 46
@@ -61,7 +62,6 @@
61 * Define the fixed address of the FEC hardware. 62 * Define the fixed address of the FEC hardware.
62 */ 63 */
63#if defined(CONFIG_M5272) 64#if defined(CONFIG_M5272)
64#define HAVE_mii_link_interrupt
65 65
66static unsigned char fec_mac_default[] = { 66static unsigned char fec_mac_default[] = {
67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 67 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -86,23 +86,6 @@ static unsigned char fec_mac_default[] = {
86#endif 86#endif
87#endif /* CONFIG_M5272 */ 87#endif /* CONFIG_M5272 */
88 88
89/* Forward declarations of some structures to support different PHYs */
90
91typedef struct {
92 uint mii_data;
93 void (*funct)(uint mii_reg, struct net_device *dev);
94} phy_cmd_t;
95
96typedef struct {
97 uint id;
98 char *name;
99
100 const phy_cmd_t *config;
101 const phy_cmd_t *startup;
102 const phy_cmd_t *ack_int;
103 const phy_cmd_t *shutdown;
104} phy_info_t;
105
106/* The number of Tx and Rx buffers. These are allocated from the page 89/* The number of Tx and Rx buffers. These are allocated from the page
107 * pool. The code may assume these are power of two, so it it best 90 * pool. The code may assume these are power of two, so it it best
108 * to keep them that size. 91 * to keep them that size.
@@ -189,29 +172,21 @@ struct fec_enet_private {
189 uint tx_full; 172 uint tx_full;
190 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */ 173 /* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
191 spinlock_t hw_lock; 174 spinlock_t hw_lock;
192 /* hold while accessing the mii_list_t() elements */
193 spinlock_t mii_lock;
194
195 uint phy_id;
196 uint phy_id_done;
197 uint phy_status;
198 uint phy_speed;
199 phy_info_t const *phy;
200 struct work_struct phy_task;
201 175
202 uint sequence_done; 176 struct platform_device *pdev;
203 uint mii_phy_task_queued;
204 177
205 uint phy_addr; 178 int opened;
206 179
180 /* Phylib and MDIO interface */
181 struct mii_bus *mii_bus;
182 struct phy_device *phy_dev;
183 int mii_timeout;
184 uint phy_speed;
207 int index; 185 int index;
208 int opened;
209 int link; 186 int link;
210 int old_link;
211 int full_duplex; 187 int full_duplex;
212}; 188};
213 189
214static void fec_enet_mii(struct net_device *dev);
215static irqreturn_t fec_enet_interrupt(int irq, void * dev_id); 190static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
216static void fec_enet_tx(struct net_device *dev); 191static void fec_enet_tx(struct net_device *dev);
217static void fec_enet_rx(struct net_device *dev); 192static void fec_enet_rx(struct net_device *dev);
@@ -219,67 +194,20 @@ static int fec_enet_close(struct net_device *dev);
219static void fec_restart(struct net_device *dev, int duplex); 194static void fec_restart(struct net_device *dev, int duplex);
220static void fec_stop(struct net_device *dev); 195static void fec_stop(struct net_device *dev);
221 196
197/* FEC MII MMFR bits definition */
198#define FEC_MMFR_ST (1 << 30)
199#define FEC_MMFR_OP_READ (2 << 28)
200#define FEC_MMFR_OP_WRITE (1 << 28)
201#define FEC_MMFR_PA(v) ((v & 0x1f) << 23)
202#define FEC_MMFR_RA(v) ((v & 0x1f) << 18)
203#define FEC_MMFR_TA (2 << 16)
204#define FEC_MMFR_DATA(v) (v & 0xffff)
222 205
223/* MII processing. We keep this as simple as possible. Requests are 206#define FEC_MII_TIMEOUT 10000
224 * placed on the list (if there is room). When the request is finished
225 * by the MII, an optional function may be called.
226 */
227typedef struct mii_list {
228 uint mii_regval;
229 void (*mii_func)(uint val, struct net_device *dev);
230 struct mii_list *mii_next;
231} mii_list_t;
232
233#define NMII 20
234static mii_list_t mii_cmds[NMII];
235static mii_list_t *mii_free;
236static mii_list_t *mii_head;
237static mii_list_t *mii_tail;
238
239static int mii_queue(struct net_device *dev, int request,
240 void (*func)(uint, struct net_device *));
241
242/* Make MII read/write commands for the FEC */
243#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
244#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
245 (VAL & 0xffff))
246#define mk_mii_end 0
247 207
248/* Transmitter timeout */ 208/* Transmitter timeout */
249#define TX_TIMEOUT (2 * HZ) 209#define TX_TIMEOUT (2 * HZ)
250 210
251/* Register definitions for the PHY */
252
253#define MII_REG_CR 0 /* Control Register */
254#define MII_REG_SR 1 /* Status Register */
255#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
256#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
257#define MII_REG_ANAR 4 /* A-N Advertisement Register */
258#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
259#define MII_REG_ANER 6 /* A-N Expansion Register */
260#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
261#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
262
263/* values for phy_status */
264
265#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
266#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
267#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
268#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
269#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
270#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
271#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
272
273#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
274#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
275#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
276#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
277#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
278#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
279#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
280#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
281
282
283static int 211static int
284fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 212fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
285{ 213{
@@ -406,12 +334,6 @@ fec_enet_interrupt(int irq, void * dev_id)
406 ret = IRQ_HANDLED; 334 ret = IRQ_HANDLED;
407 fec_enet_tx(dev); 335 fec_enet_tx(dev);
408 } 336 }
409
410 if (int_events & FEC_ENET_MII) {
411 ret = IRQ_HANDLED;
412 fec_enet_mii(dev);
413 }
414
415 } while (int_events); 337 } while (int_events);
416 338
417 return ret; 339 return ret;
@@ -607,827 +529,311 @@ rx_processing_done:
607 spin_unlock(&fep->hw_lock); 529 spin_unlock(&fep->hw_lock);
608} 530}
609 531
610/* called from interrupt context */ 532/* ------------------------------------------------------------------------- */
611static void 533#ifdef CONFIG_M5272
612fec_enet_mii(struct net_device *dev) 534static void __inline__ fec_get_mac(struct net_device *dev)
613{
614 struct fec_enet_private *fep;
615 mii_list_t *mip;
616
617 fep = netdev_priv(dev);
618 spin_lock(&fep->mii_lock);
619
620 if ((mip = mii_head) == NULL) {
621 printk("MII and no head!\n");
622 goto unlock;
623 }
624
625 if (mip->mii_func != NULL)
626 (*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
627
628 mii_head = mip->mii_next;
629 mip->mii_next = mii_free;
630 mii_free = mip;
631
632 if ((mip = mii_head) != NULL)
633 writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
634
635unlock:
636 spin_unlock(&fep->mii_lock);
637}
638
639static int
640mii_queue_unlocked(struct net_device *dev, int regval,
641 void (*func)(uint, struct net_device *))
642{ 535{
643 struct fec_enet_private *fep; 536 struct fec_enet_private *fep = netdev_priv(dev);
644 mii_list_t *mip; 537 unsigned char *iap, tmpaddr[ETH_ALEN];
645 int retval;
646
647 /* Add PHY address to register command */
648 fep = netdev_priv(dev);
649 538
650 regval |= fep->phy_addr << 23; 539 if (FEC_FLASHMAC) {
651 retval = 0; 540 /*
652 541 * Get MAC address from FLASH.
653 if ((mip = mii_free) != NULL) { 542 * If it is all 1's or 0's, use the default.
654 mii_free = mip->mii_next; 543 */
655 mip->mii_regval = regval; 544 iap = (unsigned char *)FEC_FLASHMAC;
656 mip->mii_func = func; 545 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
657 mip->mii_next = NULL; 546 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
658 if (mii_head) { 547 iap = fec_mac_default;
659 mii_tail->mii_next = mip; 548 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
660 mii_tail = mip; 549 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
661 } else { 550 iap = fec_mac_default;
662 mii_head = mii_tail = mip;
663 writel(regval, fep->hwp + FEC_MII_DATA);
664 }
665 } else { 551 } else {
666 retval = 1; 552 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
553 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
554 iap = &tmpaddr[0];
667 } 555 }
668 556
669 return retval; 557 memcpy(dev->dev_addr, iap, ETH_ALEN);
670}
671
672static int
673mii_queue(struct net_device *dev, int regval,
674 void (*func)(uint, struct net_device *))
675{
676 struct fec_enet_private *fep;
677 unsigned long flags;
678 int retval;
679 fep = netdev_priv(dev);
680 spin_lock_irqsave(&fep->mii_lock, flags);
681 retval = mii_queue_unlocked(dev, regval, func);
682 spin_unlock_irqrestore(&fep->mii_lock, flags);
683 return retval;
684}
685
686static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
687{
688 if(!c)
689 return;
690 558
691 for (; c->mii_data != mk_mii_end; c++) 559 /* Adjust MAC if using default MAC address */
692 mii_queue(dev, c->mii_data, c->funct); 560 if (iap == fec_mac_default)
561 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
693} 562}
563#endif
694 564
695static void mii_parse_sr(uint mii_reg, struct net_device *dev) 565/* ------------------------------------------------------------------------- */
696{
697 struct fec_enet_private *fep = netdev_priv(dev);
698 volatile uint *s = &(fep->phy_status);
699 uint status;
700
701 status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
702
703 if (mii_reg & 0x0004)
704 status |= PHY_STAT_LINK;
705 if (mii_reg & 0x0010)
706 status |= PHY_STAT_FAULT;
707 if (mii_reg & 0x0020)
708 status |= PHY_STAT_ANC;
709 *s = status;
710}
711 566
712static void mii_parse_cr(uint mii_reg, struct net_device *dev) 567/*
568 * Phy section
569 */
570static void fec_enet_adjust_link(struct net_device *dev)
713{ 571{
714 struct fec_enet_private *fep = netdev_priv(dev); 572 struct fec_enet_private *fep = netdev_priv(dev);
715 volatile uint *s = &(fep->phy_status); 573 struct phy_device *phy_dev = fep->phy_dev;
716 uint status; 574 unsigned long flags;
717
718 status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
719
720 if (mii_reg & 0x1000)
721 status |= PHY_CONF_ANE;
722 if (mii_reg & 0x4000)
723 status |= PHY_CONF_LOOP;
724 *s = status;
725}
726 575
727static void mii_parse_anar(uint mii_reg, struct net_device *dev) 576 int status_change = 0;
728{
729 struct fec_enet_private *fep = netdev_priv(dev);
730 volatile uint *s = &(fep->phy_status);
731 uint status;
732
733 status = *s & ~(PHY_CONF_SPMASK);
734
735 if (mii_reg & 0x0020)
736 status |= PHY_CONF_10HDX;
737 if (mii_reg & 0x0040)
738 status |= PHY_CONF_10FDX;
739 if (mii_reg & 0x0080)
740 status |= PHY_CONF_100HDX;
741 if (mii_reg & 0x00100)
742 status |= PHY_CONF_100FDX;
743 *s = status;
744}
745 577
746/* ------------------------------------------------------------------------- */ 578 spin_lock_irqsave(&fep->hw_lock, flags);
747/* The Level one LXT970 is used by many boards */
748 579
749#define MII_LXT970_MIRROR 16 /* Mirror register */ 580 /* Prevent a state halted on mii error */
750#define MII_LXT970_IER 17 /* Interrupt Enable Register */ 581 if (fep->mii_timeout && phy_dev->state == PHY_HALTED) {
751#define MII_LXT970_ISR 18 /* Interrupt Status Register */ 582 phy_dev->state = PHY_RESUMING;
752#define MII_LXT970_CONFIG 19 /* Configuration Register */ 583 goto spin_unlock;
753#define MII_LXT970_CSR 20 /* Chip Status Register */ 584 }
754 585
755static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev) 586 /* Duplex link change */
756{ 587 if (phy_dev->link) {
757 struct fec_enet_private *fep = netdev_priv(dev); 588 if (fep->full_duplex != phy_dev->duplex) {
758 volatile uint *s = &(fep->phy_status); 589 fec_restart(dev, phy_dev->duplex);
759 uint status; 590 status_change = 1;
591 }
592 }
760 593
761 status = *s & ~(PHY_STAT_SPMASK); 594 /* Link on or off change */
762 if (mii_reg & 0x0800) { 595 if (phy_dev->link != fep->link) {
763 if (mii_reg & 0x1000) 596 fep->link = phy_dev->link;
764 status |= PHY_STAT_100FDX; 597 if (phy_dev->link)
598 fec_restart(dev, phy_dev->duplex);
765 else 599 else
766 status |= PHY_STAT_100HDX; 600 fec_stop(dev);
767 } else { 601 status_change = 1;
768 if (mii_reg & 0x1000)
769 status |= PHY_STAT_10FDX;
770 else
771 status |= PHY_STAT_10HDX;
772 } 602 }
773 *s = status;
774}
775
776static phy_cmd_t const phy_cmd_lxt970_config[] = {
777 { mk_mii_read(MII_REG_CR), mii_parse_cr },
778 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
779 { mk_mii_end, }
780 };
781static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
782 { mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
783 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
784 { mk_mii_end, }
785 };
786static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
787 /* read SR and ISR to acknowledge */
788 { mk_mii_read(MII_REG_SR), mii_parse_sr },
789 { mk_mii_read(MII_LXT970_ISR), NULL },
790
791 /* find out the current status */
792 { mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
793 { mk_mii_end, }
794 };
795static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
796 { mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
797 { mk_mii_end, }
798 };
799static phy_info_t const phy_info_lxt970 = {
800 .id = 0x07810000,
801 .name = "LXT970",
802 .config = phy_cmd_lxt970_config,
803 .startup = phy_cmd_lxt970_startup,
804 .ack_int = phy_cmd_lxt970_ack_int,
805 .shutdown = phy_cmd_lxt970_shutdown
806};
807 603
808/* ------------------------------------------------------------------------- */ 604spin_unlock:
809/* The Level one LXT971 is used on some of my custom boards */ 605 spin_unlock_irqrestore(&fep->hw_lock, flags);
810
811/* register definitions for the 971 */
812 606
813#define MII_LXT971_PCR 16 /* Port Control Register */ 607 if (status_change)
814#define MII_LXT971_SR2 17 /* Status Register 2 */ 608 phy_print_status(phy_dev);
815#define MII_LXT971_IER 18 /* Interrupt Enable Register */ 609}
816#define MII_LXT971_ISR 19 /* Interrupt Status Register */
817#define MII_LXT971_LCR 20 /* LED Control Register */
818#define MII_LXT971_TCR 30 /* Transmit Control Register */
819 610
820/* 611/*
821 * I had some nice ideas of running the MDIO faster... 612 * NOTE: a MII transaction is during around 25 us, so polling it...
822 * The 971 should support 8MHz and I tried it, but things acted really
823 * weird, so 2.5 MHz ought to be enough for anyone...
824 */ 613 */
825 614static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
826static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
827{ 615{
828 struct fec_enet_private *fep = netdev_priv(dev); 616 struct fec_enet_private *fep = bus->priv;
829 volatile uint *s = &(fep->phy_status); 617 int timeout = FEC_MII_TIMEOUT;
830 uint status;
831 618
832 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC); 619 fep->mii_timeout = 0;
833 620
834 if (mii_reg & 0x0400) { 621 /* clear MII end of transfer bit*/
835 fep->link = 1; 622 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
836 status |= PHY_STAT_LINK; 623
837 } else { 624 /* start a read op */
838 fep->link = 0; 625 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
839 } 626 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
840 if (mii_reg & 0x0080) 627 FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
841 status |= PHY_STAT_ANC; 628
842 if (mii_reg & 0x4000) { 629 /* wait for end of transfer */
843 if (mii_reg & 0x0200) 630 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
844 status |= PHY_STAT_100FDX; 631 cpu_relax();
845 else 632 if (timeout-- < 0) {
846 status |= PHY_STAT_100HDX; 633 fep->mii_timeout = 1;
847 } else { 634 printk(KERN_ERR "FEC: MDIO read timeout\n");
848 if (mii_reg & 0x0200) 635 return -ETIMEDOUT;
849 status |= PHY_STAT_10FDX; 636 }
850 else
851 status |= PHY_STAT_10HDX;
852 } 637 }
853 if (mii_reg & 0x0008)
854 status |= PHY_STAT_FAULT;
855 638
856 *s = status; 639 /* return value */
640 return FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
857} 641}
858 642
859static phy_cmd_t const phy_cmd_lxt971_config[] = { 643static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
860 /* limit to 10MBit because my prototype board 644 u16 value)
861 * doesn't work with 100. */
862 { mk_mii_read(MII_REG_CR), mii_parse_cr },
863 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
864 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
865 { mk_mii_end, }
866 };
867static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
868 { mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
869 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
870 { mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
871 /* Somehow does the 971 tell me that the link is down
872 * the first read after power-up.
873 * read here to get a valid value in ack_int */
874 { mk_mii_read(MII_REG_SR), mii_parse_sr },
875 { mk_mii_end, }
876 };
877static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
878 /* acknowledge the int before reading status ! */
879 { mk_mii_read(MII_LXT971_ISR), NULL },
880 /* find out the current status */
881 { mk_mii_read(MII_REG_SR), mii_parse_sr },
882 { mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
883 { mk_mii_end, }
884 };
885static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
886 { mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
887 { mk_mii_end, }
888 };
889static phy_info_t const phy_info_lxt971 = {
890 .id = 0x0001378e,
891 .name = "LXT971",
892 .config = phy_cmd_lxt971_config,
893 .startup = phy_cmd_lxt971_startup,
894 .ack_int = phy_cmd_lxt971_ack_int,
895 .shutdown = phy_cmd_lxt971_shutdown
896};
897
898/* ------------------------------------------------------------------------- */
899/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
900
901/* register definitions */
902
903#define MII_QS6612_MCR 17 /* Mode Control Register */
904#define MII_QS6612_FTR 27 /* Factory Test Register */
905#define MII_QS6612_MCO 28 /* Misc. Control Register */
906#define MII_QS6612_ISR 29 /* Interrupt Source Register */
907#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
908#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
909
910static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
911{ 645{
912 struct fec_enet_private *fep = netdev_priv(dev); 646 struct fec_enet_private *fep = bus->priv;
913 volatile uint *s = &(fep->phy_status); 647 int timeout = FEC_MII_TIMEOUT;
914 uint status;
915 648
916 status = *s & ~(PHY_STAT_SPMASK); 649 fep->mii_timeout = 0;
917 650
918 switch((mii_reg >> 2) & 7) { 651 /* clear MII end of transfer bit*/
919 case 1: status |= PHY_STAT_10HDX; break; 652 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
920 case 2: status |= PHY_STAT_100HDX; break;
921 case 5: status |= PHY_STAT_10FDX; break;
922 case 6: status |= PHY_STAT_100FDX; break;
923}
924
925 *s = status;
926}
927
928static phy_cmd_t const phy_cmd_qs6612_config[] = {
929 /* The PHY powers up isolated on the RPX,
930 * so send a command to allow operation.
931 */
932 { mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
933
934 /* parse cr and anar to get some info */
935 { mk_mii_read(MII_REG_CR), mii_parse_cr },
936 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
937 { mk_mii_end, }
938 };
939static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
940 { mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
941 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
942 { mk_mii_end, }
943 };
944static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
945 /* we need to read ISR, SR and ANER to acknowledge */
946 { mk_mii_read(MII_QS6612_ISR), NULL },
947 { mk_mii_read(MII_REG_SR), mii_parse_sr },
948 { mk_mii_read(MII_REG_ANER), NULL },
949
950 /* read pcr to get info */
951 { mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
952 { mk_mii_end, }
953 };
954static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
955 { mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
956 { mk_mii_end, }
957 };
958static phy_info_t const phy_info_qs6612 = {
959 .id = 0x00181440,
960 .name = "QS6612",
961 .config = phy_cmd_qs6612_config,
962 .startup = phy_cmd_qs6612_startup,
963 .ack_int = phy_cmd_qs6612_ack_int,
964 .shutdown = phy_cmd_qs6612_shutdown
965};
966
967/* ------------------------------------------------------------------------- */
968/* AMD AM79C874 phy */
969 653
970/* register definitions for the 874 */ 654 /* start a read op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ |
656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
657 FEC_MMFR_TA | FEC_MMFR_DATA(value),
658 fep->hwp + FEC_MII_DATA);
659
660 /* wait for end of transfer */
661 while (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_MII)) {
662 cpu_relax();
663 if (timeout-- < 0) {
664 fep->mii_timeout = 1;
665 printk(KERN_ERR "FEC: MDIO write timeout\n");
666 return -ETIMEDOUT;
667 }
668 }
971 669
972#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */ 670 return 0;
973#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */ 671}
974#define MII_AM79C874_DR 18 /* Diagnostic Register */
975#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
976#define MII_AM79C874_MCR 21 /* ModeControl Register */
977#define MII_AM79C874_DC 23 /* Disconnect Counter */
978#define MII_AM79C874_REC 24 /* Recieve Error Counter */
979 672
980static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev) 673static int fec_enet_mdio_reset(struct mii_bus *bus)
981{ 674{
982 struct fec_enet_private *fep = netdev_priv(dev); 675 return 0;
983 volatile uint *s = &(fep->phy_status);
984 uint status;
985
986 status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
987
988 if (mii_reg & 0x0080)
989 status |= PHY_STAT_ANC;
990 if (mii_reg & 0x0400)
991 status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
992 else
993 status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
994
995 *s = status;
996} 676}
997 677
998static phy_cmd_t const phy_cmd_am79c874_config[] = { 678static int fec_enet_mii_probe(struct net_device *dev)
999 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1000 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1001 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1002 { mk_mii_end, }
1003 };
1004static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
1005 { mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
1006 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1007 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1008 { mk_mii_end, }
1009 };
1010static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
1011 /* find out the current status */
1012 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1013 { mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
1014 /* we only need to read ISR to acknowledge */
1015 { mk_mii_read(MII_AM79C874_ICSR), NULL },
1016 { mk_mii_end, }
1017 };
1018static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
1019 { mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
1020 { mk_mii_end, }
1021 };
1022static phy_info_t const phy_info_am79c874 = {
1023 .id = 0x00022561,
1024 .name = "AM79C874",
1025 .config = phy_cmd_am79c874_config,
1026 .startup = phy_cmd_am79c874_startup,
1027 .ack_int = phy_cmd_am79c874_ack_int,
1028 .shutdown = phy_cmd_am79c874_shutdown
1029};
1030
1031
1032/* ------------------------------------------------------------------------- */
1033/* Kendin KS8721BL phy */
1034
1035/* register definitions for the 8721 */
1036
1037#define MII_KS8721BL_RXERCR 21
1038#define MII_KS8721BL_ICSR 27
1039#define MII_KS8721BL_PHYCR 31
1040
1041static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
1042 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1043 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1044 { mk_mii_end, }
1045 };
1046static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
1047 { mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
1048 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1049 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1050 { mk_mii_end, }
1051 };
1052static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
1053 /* find out the current status */
1054 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1055 /* we only need to read ISR to acknowledge */
1056 { mk_mii_read(MII_KS8721BL_ICSR), NULL },
1057 { mk_mii_end, }
1058 };
1059static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
1060 { mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
1061 { mk_mii_end, }
1062 };
1063static phy_info_t const phy_info_ks8721bl = {
1064 .id = 0x00022161,
1065 .name = "KS8721BL",
1066 .config = phy_cmd_ks8721bl_config,
1067 .startup = phy_cmd_ks8721bl_startup,
1068 .ack_int = phy_cmd_ks8721bl_ack_int,
1069 .shutdown = phy_cmd_ks8721bl_shutdown
1070};
1071
1072/* ------------------------------------------------------------------------- */
1073/* register definitions for the DP83848 */
1074
1075#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
1076
1077static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
1078{ 679{
1079 struct fec_enet_private *fep = netdev_priv(dev); 680 struct fec_enet_private *fep = netdev_priv(dev);
1080 volatile uint *s = &(fep->phy_status); 681 struct phy_device *phy_dev = NULL;
1081 682 int phy_addr;
1082 *s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
1083
1084 /* Link up */
1085 if (mii_reg & 0x0001) {
1086 fep->link = 1;
1087 *s |= PHY_STAT_LINK;
1088 } else
1089 fep->link = 0;
1090 /* Status of link */
1091 if (mii_reg & 0x0010) /* Autonegotioation complete */
1092 *s |= PHY_STAT_ANC;
1093 if (mii_reg & 0x0002) { /* 10MBps? */
1094 if (mii_reg & 0x0004) /* Full Duplex? */
1095 *s |= PHY_STAT_10FDX;
1096 else
1097 *s |= PHY_STAT_10HDX;
1098 } else { /* 100 Mbps? */
1099 if (mii_reg & 0x0004) /* Full Duplex? */
1100 *s |= PHY_STAT_100FDX;
1101 else
1102 *s |= PHY_STAT_100HDX;
1103 }
1104 if (mii_reg & 0x0008)
1105 *s |= PHY_STAT_FAULT;
1106}
1107
1108static phy_info_t phy_info_dp83848= {
1109 0x020005c9,
1110 "DP83848",
1111 683
1112 (const phy_cmd_t []) { /* config */ 684 /* find the first phy */
1113 { mk_mii_read(MII_REG_CR), mii_parse_cr }, 685 for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
1114 { mk_mii_read(MII_REG_ANAR), mii_parse_anar }, 686 if (fep->mii_bus->phy_map[phy_addr]) {
1115 { mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 }, 687 phy_dev = fep->mii_bus->phy_map[phy_addr];
1116 { mk_mii_end, } 688 break;
1117 }, 689 }
1118 (const phy_cmd_t []) { /* startup - enable interrupts */ 690 }
1119 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1120 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1121 { mk_mii_end, }
1122 },
1123 (const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
1124 { mk_mii_end, }
1125 },
1126 (const phy_cmd_t []) { /* shutdown */
1127 { mk_mii_end, }
1128 },
1129};
1130 691
1131static phy_info_t phy_info_lan8700 = { 692 if (!phy_dev) {
1132 0x0007C0C, 693 printk(KERN_ERR "%s: no PHY found\n", dev->name);
1133 "LAN8700", 694 return -ENODEV;
1134 (const phy_cmd_t []) { /* config */ 695 }
1135 { mk_mii_read(MII_REG_CR), mii_parse_cr },
1136 { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
1137 { mk_mii_end, }
1138 },
1139 (const phy_cmd_t []) { /* startup */
1140 { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
1141 { mk_mii_read(MII_REG_SR), mii_parse_sr },
1142 { mk_mii_end, }
1143 },
1144 (const phy_cmd_t []) { /* act_int */
1145 { mk_mii_end, }
1146 },
1147 (const phy_cmd_t []) { /* shutdown */
1148 { mk_mii_end, }
1149 },
1150};
1151/* ------------------------------------------------------------------------- */
1152 696
1153static phy_info_t const * const phy_info[] = { 697 /* attach the mac to the phy */
1154 &phy_info_lxt970, 698 phy_dev = phy_connect(dev, dev_name(&phy_dev->dev),
1155 &phy_info_lxt971, 699 &fec_enet_adjust_link, 0,
1156 &phy_info_qs6612, 700 PHY_INTERFACE_MODE_MII);
1157 &phy_info_am79c874, 701 if (IS_ERR(phy_dev)) {
1158 &phy_info_ks8721bl, 702 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
1159 &phy_info_dp83848, 703 return PTR_ERR(phy_dev);
1160 &phy_info_lan8700, 704 }
1161 NULL
1162};
1163 705
1164/* ------------------------------------------------------------------------- */ 706 /* mask with MAC supported features */
1165#ifdef HAVE_mii_link_interrupt 707 phy_dev->supported &= PHY_BASIC_FEATURES;
1166static irqreturn_t 708 phy_dev->advertising = phy_dev->supported;
1167mii_link_interrupt(int irq, void * dev_id);
1168 709
1169/* 710 fep->phy_dev = phy_dev;
1170 * This is specific to the MII interrupt setup of the M5272EVB. 711 fep->link = 0;
1171 */ 712 fep->full_duplex = 0;
1172static void __inline__ fec_request_mii_intr(struct net_device *dev)
1173{
1174 if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
1175 printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
1176}
1177 713
1178static void __inline__ fec_disable_phy_intr(struct net_device *dev) 714 return 0;
1179{
1180 free_irq(66, dev);
1181} 715}
1182#endif
1183 716
1184#ifdef CONFIG_M5272 717static int fec_enet_mii_init(struct platform_device *pdev)
1185static void __inline__ fec_get_mac(struct net_device *dev)
1186{ 718{
719 struct net_device *dev = platform_get_drvdata(pdev);
1187 struct fec_enet_private *fep = netdev_priv(dev); 720 struct fec_enet_private *fep = netdev_priv(dev);
1188 unsigned char *iap, tmpaddr[ETH_ALEN]; 721 int err = -ENXIO, i;
1189 722
1190 if (FEC_FLASHMAC) { 723 fep->mii_timeout = 0;
1191 /*
1192 * Get MAC address from FLASH.
1193 * If it is all 1's or 0's, use the default.
1194 */
1195 iap = (unsigned char *)FEC_FLASHMAC;
1196 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
1197 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
1198 iap = fec_mac_default;
1199 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
1200 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
1201 iap = fec_mac_default;
1202 } else {
1203 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
1204 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
1205 iap = &tmpaddr[0];
1206 }
1207
1208 memcpy(dev->dev_addr, iap, ETH_ALEN);
1209
1210 /* Adjust MAC if using default MAC address */
1211 if (iap == fec_mac_default)
1212 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
1213}
1214#endif
1215 724
1216/* ------------------------------------------------------------------------- */ 725 /*
1217 726 * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
1218static void mii_display_status(struct net_device *dev) 727 */
1219{ 728 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000) << 1;
1220 struct fec_enet_private *fep = netdev_priv(dev); 729 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1221 volatile uint *s = &(fep->phy_status);
1222 730
1223 if (!fep->link && !fep->old_link) { 731 fep->mii_bus = mdiobus_alloc();
1224 /* Link is still down - don't print anything */ 732 if (fep->mii_bus == NULL) {
1225 return; 733 err = -ENOMEM;
734 goto err_out;
1226 } 735 }
1227 736
1228 printk("%s: status: ", dev->name); 737 fep->mii_bus->name = "fec_enet_mii_bus";
1229 738 fep->mii_bus->read = fec_enet_mdio_read;
1230 if (!fep->link) { 739 fep->mii_bus->write = fec_enet_mdio_write;
1231 printk("link down"); 740 fep->mii_bus->reset = fec_enet_mdio_reset;
1232 } else { 741 snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
1233 printk("link up"); 742 fep->mii_bus->priv = fep;
1234 743 fep->mii_bus->parent = &pdev->dev;
1235 switch(*s & PHY_STAT_SPMASK) { 744
1236 case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break; 745 fep->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
1237 case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break; 746 if (!fep->mii_bus->irq) {
1238 case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break; 747 err = -ENOMEM;
1239 case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break; 748 goto err_out_free_mdiobus;
1240 default:
1241 printk(", Unknown speed/duplex");
1242 }
1243
1244 if (*s & PHY_STAT_ANC)
1245 printk(", auto-negotiation complete");
1246 } 749 }
1247 750
1248 if (*s & PHY_STAT_FAULT) 751 for (i = 0; i < PHY_MAX_ADDR; i++)
1249 printk(", remote fault"); 752 fep->mii_bus->irq[i] = PHY_POLL;
1250
1251 printk(".\n");
1252}
1253
1254static void mii_display_config(struct work_struct *work)
1255{
1256 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
1257 struct net_device *dev = fep->netdev;
1258 uint status = fep->phy_status;
1259 753
1260 /* 754 platform_set_drvdata(dev, fep->mii_bus);
1261 ** When we get here, phy_task is already removed from
1262 ** the workqueue. It is thus safe to allow to reuse it.
1263 */
1264 fep->mii_phy_task_queued = 0;
1265 printk("%s: config: auto-negotiation ", dev->name);
1266
1267 if (status & PHY_CONF_ANE)
1268 printk("on");
1269 else
1270 printk("off");
1271 755
1272 if (status & PHY_CONF_100FDX) 756 if (mdiobus_register(fep->mii_bus))
1273 printk(", 100FDX"); 757 goto err_out_free_mdio_irq;
1274 if (status & PHY_CONF_100HDX)
1275 printk(", 100HDX");
1276 if (status & PHY_CONF_10FDX)
1277 printk(", 10FDX");
1278 if (status & PHY_CONF_10HDX)
1279 printk(", 10HDX");
1280 if (!(status & PHY_CONF_SPMASK))
1281 printk(", No speed/duplex selected?");
1282 758
1283 if (status & PHY_CONF_LOOP) 759 if (fec_enet_mii_probe(dev) != 0)
1284 printk(", loopback enabled"); 760 goto err_out_unregister_bus;
1285 761
1286 printk(".\n"); 762 return 0;
1287 763
1288 fep->sequence_done = 1; 764err_out_unregister_bus:
765 mdiobus_unregister(fep->mii_bus);
766err_out_free_mdio_irq:
767 kfree(fep->mii_bus->irq);
768err_out_free_mdiobus:
769 mdiobus_free(fep->mii_bus);
770err_out:
771 return err;
1289} 772}
1290 773
1291static void mii_relink(struct work_struct *work) 774static void fec_enet_mii_remove(struct fec_enet_private *fep)
1292{ 775{
1293 struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task); 776 if (fep->phy_dev)
1294 struct net_device *dev = fep->netdev; 777 phy_disconnect(fep->phy_dev);
1295 int duplex; 778 mdiobus_unregister(fep->mii_bus);
1296 779 kfree(fep->mii_bus->irq);
1297 /* 780 mdiobus_free(fep->mii_bus);
1298 ** When we get here, phy_task is already removed from
1299 ** the workqueue. It is thus safe to allow to reuse it.
1300 */
1301 fep->mii_phy_task_queued = 0;
1302 fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
1303 mii_display_status(dev);
1304 fep->old_link = fep->link;
1305
1306 if (fep->link) {
1307 duplex = 0;
1308 if (fep->phy_status
1309 & (PHY_STAT_100FDX | PHY_STAT_10FDX))
1310 duplex = 1;
1311 fec_restart(dev, duplex);
1312 } else
1313 fec_stop(dev);
1314} 781}
1315 782
1316/* mii_queue_relink is called in interrupt context from mii_link_interrupt */ 783static int fec_enet_get_settings(struct net_device *dev,
1317static void mii_queue_relink(uint mii_reg, struct net_device *dev) 784 struct ethtool_cmd *cmd)
1318{ 785{
1319 struct fec_enet_private *fep = netdev_priv(dev); 786 struct fec_enet_private *fep = netdev_priv(dev);
787 struct phy_device *phydev = fep->phy_dev;
1320 788
1321 /* 789 if (!phydev)
1322 * We cannot queue phy_task twice in the workqueue. It 790 return -ENODEV;
1323 * would cause an endless loop in the workqueue.
1324 * Fortunately, if the last mii_relink entry has not yet been
1325 * executed now, it will do the job for the current interrupt,
1326 * which is just what we want.
1327 */
1328 if (fep->mii_phy_task_queued)
1329 return;
1330 791
1331 fep->mii_phy_task_queued = 1; 792 return phy_ethtool_gset(phydev, cmd);
1332 INIT_WORK(&fep->phy_task, mii_relink);
1333 schedule_work(&fep->phy_task);
1334} 793}
1335 794
1336/* mii_queue_config is called in interrupt context from fec_enet_mii */ 795static int fec_enet_set_settings(struct net_device *dev,
1337static void mii_queue_config(uint mii_reg, struct net_device *dev) 796 struct ethtool_cmd *cmd)
1338{ 797{
1339 struct fec_enet_private *fep = netdev_priv(dev); 798 struct fec_enet_private *fep = netdev_priv(dev);
799 struct phy_device *phydev = fep->phy_dev;
1340 800
1341 if (fep->mii_phy_task_queued) 801 if (!phydev)
1342 return; 802 return -ENODEV;
1343 803
1344 fep->mii_phy_task_queued = 1; 804 return phy_ethtool_sset(phydev, cmd);
1345 INIT_WORK(&fep->phy_task, mii_display_config);
1346 schedule_work(&fep->phy_task);
1347} 805}
1348 806
1349phy_cmd_t const phy_cmd_relink[] = { 807static void fec_enet_get_drvinfo(struct net_device *dev,
1350 { mk_mii_read(MII_REG_CR), mii_queue_relink }, 808 struct ethtool_drvinfo *info)
1351 { mk_mii_end, }
1352 };
1353phy_cmd_t const phy_cmd_config[] = {
1354 { mk_mii_read(MII_REG_CR), mii_queue_config },
1355 { mk_mii_end, }
1356 };
1357
1358/* Read remainder of PHY ID. */
1359static void
1360mii_discover_phy3(uint mii_reg, struct net_device *dev)
1361{ 809{
1362 struct fec_enet_private *fep; 810 struct fec_enet_private *fep = netdev_priv(dev);
1363 int i;
1364
1365 fep = netdev_priv(dev);
1366 fep->phy_id |= (mii_reg & 0xffff);
1367 printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
1368
1369 for(i = 0; phy_info[i]; i++) {
1370 if(phy_info[i]->id == (fep->phy_id >> 4))
1371 break;
1372 }
1373
1374 if (phy_info[i])
1375 printk(" -- %s\n", phy_info[i]->name);
1376 else
1377 printk(" -- unknown PHY!\n");
1378 811
1379 fep->phy = phy_info[i]; 812 strcpy(info->driver, fep->pdev->dev.driver->name);
1380 fep->phy_id_done = 1; 813 strcpy(info->version, "Revision: 1.0");
814 strcpy(info->bus_info, dev_name(&dev->dev));
1381} 815}
1382 816
1383/* Scan all of the MII PHY addresses looking for someone to respond 817static struct ethtool_ops fec_enet_ethtool_ops = {
1384 * with a valid ID. This usually happens quickly. 818 .get_settings = fec_enet_get_settings,
1385 */ 819 .set_settings = fec_enet_set_settings,
1386static void 820 .get_drvinfo = fec_enet_get_drvinfo,
1387mii_discover_phy(uint mii_reg, struct net_device *dev) 821 .get_link = ethtool_op_get_link,
1388{ 822};
1389 struct fec_enet_private *fep;
1390 uint phytype;
1391
1392 fep = netdev_priv(dev);
1393
1394 if (fep->phy_addr < 32) {
1395 if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
1396
1397 /* Got first part of ID, now get remainder */
1398 fep->phy_id = phytype << 16;
1399 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
1400 mii_discover_phy3);
1401 } else {
1402 fep->phy_addr++;
1403 mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
1404 mii_discover_phy);
1405 }
1406 } else {
1407 printk("FEC: No PHY device found.\n");
1408 /* Disable external MII interface */
1409 writel(0, fep->hwp + FEC_MII_SPEED);
1410 fep->phy_speed = 0;
1411#ifdef HAVE_mii_link_interrupt
1412 fec_disable_phy_intr(dev);
1413#endif
1414 }
1415}
1416 823
1417/* This interrupt occurs when the PHY detects a link change */ 824static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1418#ifdef HAVE_mii_link_interrupt
1419static irqreturn_t
1420mii_link_interrupt(int irq, void * dev_id)
1421{ 825{
1422 struct net_device *dev = dev_id;
1423 struct fec_enet_private *fep = netdev_priv(dev); 826 struct fec_enet_private *fep = netdev_priv(dev);
827 struct phy_device *phydev = fep->phy_dev;
828
829 if (!netif_running(dev))
830 return -EINVAL;
1424 831
1425 mii_do_cmd(dev, fep->phy->ack_int); 832 if (!phydev)
1426 mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */ 833 return -ENODEV;
1427 834
1428 return IRQ_HANDLED; 835 return phy_mii_ioctl(phydev, if_mii(rq), cmd);
1429} 836}
1430#endif
1431 837
1432static void fec_enet_free_buffers(struct net_device *dev) 838static void fec_enet_free_buffers(struct net_device *dev)
1433{ 839{
@@ -1509,35 +915,8 @@ fec_enet_open(struct net_device *dev)
1509 if (ret) 915 if (ret)
1510 return ret; 916 return ret;
1511 917
1512 fep->sequence_done = 0; 918 /* schedule a link state check */
1513 fep->link = 0; 919 phy_start(fep->phy_dev);
1514
1515 fec_restart(dev, 1);
1516
1517 if (fep->phy) {
1518 mii_do_cmd(dev, fep->phy->ack_int);
1519 mii_do_cmd(dev, fep->phy->config);
1520 mii_do_cmd(dev, phy_cmd_config); /* display configuration */
1521
1522 /* Poll until the PHY tells us its configuration
1523 * (not link state).
1524 * Request is initiated by mii_do_cmd above, but answer
1525 * comes by interrupt.
1526 * This should take about 25 usec per register at 2.5 MHz,
1527 * and we read approximately 5 registers.
1528 */
1529 while(!fep->sequence_done)
1530 schedule();
1531
1532 mii_do_cmd(dev, fep->phy->startup);
1533 }
1534
1535 /* Set the initial link state to true. A lot of hardware
1536 * based on this device does not implement a PHY interrupt,
1537 * so we are never notified of link change.
1538 */
1539 fep->link = 1;
1540
1541 netif_start_queue(dev); 920 netif_start_queue(dev);
1542 fep->opened = 1; 921 fep->opened = 1;
1543 return 0; 922 return 0;
@@ -1550,6 +929,7 @@ fec_enet_close(struct net_device *dev)
1550 929
1551 /* Don't know what to do yet. */ 930 /* Don't know what to do yet. */
1552 fep->opened = 0; 931 fep->opened = 0;
932 phy_stop(fep->phy_dev);
1553 netif_stop_queue(dev); 933 netif_stop_queue(dev);
1554 fec_stop(dev); 934 fec_stop(dev);
1555 935
@@ -1574,7 +954,7 @@ fec_enet_close(struct net_device *dev)
1574static void set_multicast_list(struct net_device *dev) 954static void set_multicast_list(struct net_device *dev)
1575{ 955{
1576 struct fec_enet_private *fep = netdev_priv(dev); 956 struct fec_enet_private *fep = netdev_priv(dev);
1577 struct dev_mc_list *dmi; 957 struct netdev_hw_addr *ha;
1578 unsigned int i, bit, data, crc, tmp; 958 unsigned int i, bit, data, crc, tmp;
1579 unsigned char hash; 959 unsigned char hash;
1580 960
@@ -1604,16 +984,16 @@ static void set_multicast_list(struct net_device *dev)
1604 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH); 984 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
1605 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW); 985 writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
1606 986
1607 netdev_for_each_mc_addr(dmi, dev) { 987 netdev_for_each_mc_addr(ha, dev) {
1608 /* Only support group multicast for now */ 988 /* Only support group multicast for now */
1609 if (!(dmi->dmi_addr[0] & 1)) 989 if (!(ha->addr[0] & 1))
1610 continue; 990 continue;
1611 991
1612 /* calculate crc32 value of mac address */ 992 /* calculate crc32 value of mac address */
1613 crc = 0xffffffff; 993 crc = 0xffffffff;
1614 994
1615 for (i = 0; i < dmi->dmi_addrlen; i++) { 995 for (i = 0; i < dev->addr_len; i++) {
1616 data = dmi->dmi_addr[i]; 996 data = ha->addr[i];
1617 for (bit = 0; bit < 8; bit++, data >>= 1) { 997 for (bit = 0; bit < 8; bit++, data >>= 1) {
1618 crc = (crc >> 1) ^ 998 crc = (crc >> 1) ^
1619 (((crc ^ data) & 1) ? CRC32_POLY : 0); 999 (((crc ^ data) & 1) ? CRC32_POLY : 0);
@@ -1666,6 +1046,7 @@ static const struct net_device_ops fec_netdev_ops = {
1666 .ndo_validate_addr = eth_validate_addr, 1046 .ndo_validate_addr = eth_validate_addr,
1667 .ndo_tx_timeout = fec_timeout, 1047 .ndo_tx_timeout = fec_timeout,
1668 .ndo_set_mac_address = fec_set_mac_address, 1048 .ndo_set_mac_address = fec_set_mac_address,
1049 .ndo_do_ioctl = fec_enet_ioctl,
1669}; 1050};
1670 1051
1671 /* 1052 /*
@@ -1689,7 +1070,6 @@ static int fec_enet_init(struct net_device *dev, int index)
1689 } 1070 }
1690 1071
1691 spin_lock_init(&fep->hw_lock); 1072 spin_lock_init(&fep->hw_lock);
1692 spin_lock_init(&fep->mii_lock);
1693 1073
1694 fep->index = index; 1074 fep->index = index;
1695 fep->hwp = (void __iomem *)dev->base_addr; 1075 fep->hwp = (void __iomem *)dev->base_addr;
@@ -1716,20 +1096,10 @@ static int fec_enet_init(struct net_device *dev, int index)
1716 fep->rx_bd_base = cbd_base; 1096 fep->rx_bd_base = cbd_base;
1717 fep->tx_bd_base = cbd_base + RX_RING_SIZE; 1097 fep->tx_bd_base = cbd_base + RX_RING_SIZE;
1718 1098
1719#ifdef HAVE_mii_link_interrupt
1720 fec_request_mii_intr(dev);
1721#endif
1722 /* The FEC Ethernet specific entries in the device structure */ 1099 /* The FEC Ethernet specific entries in the device structure */
1723 dev->watchdog_timeo = TX_TIMEOUT; 1100 dev->watchdog_timeo = TX_TIMEOUT;
1724 dev->netdev_ops = &fec_netdev_ops; 1101 dev->netdev_ops = &fec_netdev_ops;
1725 1102 dev->ethtool_ops = &fec_enet_ethtool_ops;
1726 for (i=0; i<NMII-1; i++)
1727 mii_cmds[i].mii_next = &mii_cmds[i+1];
1728 mii_free = mii_cmds;
1729
1730 /* Set MII speed to 2.5 MHz */
1731 fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
1732 / 2500000) / 2) & 0x3F) << 1;
1733 1103
1734 /* Initialize the receive buffer descriptors. */ 1104 /* Initialize the receive buffer descriptors. */
1735 bdp = fep->rx_bd_base; 1105 bdp = fep->rx_bd_base;
@@ -1760,13 +1130,6 @@ static int fec_enet_init(struct net_device *dev, int index)
1760 1130
1761 fec_restart(dev, 0); 1131 fec_restart(dev, 0);
1762 1132
1763 /* Queue up command to detect the PHY and initialize the
1764 * remainder of the interface.
1765 */
1766 fep->phy_id_done = 0;
1767 fep->phy_addr = 0;
1768 mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
1769
1770 return 0; 1133 return 0;
1771} 1134}
1772 1135
@@ -1835,8 +1198,7 @@ fec_restart(struct net_device *dev, int duplex)
1835 writel(0, fep->hwp + FEC_R_DES_ACTIVE); 1198 writel(0, fep->hwp + FEC_R_DES_ACTIVE);
1836 1199
1837 /* Enable interrupts we wish to service */ 1200 /* Enable interrupts we wish to service */
1838 writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII, 1201 writel(FEC_ENET_TXF | FEC_ENET_RXF, fep->hwp + FEC_IMASK);
1839 fep->hwp + FEC_IMASK);
1840} 1202}
1841 1203
1842static void 1204static void
@@ -1859,7 +1221,6 @@ fec_stop(struct net_device *dev)
1859 /* Clear outstanding MII command interrupts. */ 1221 /* Clear outstanding MII command interrupts. */
1860 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT); 1222 writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
1861 1223
1862 writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
1863 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1224 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1864} 1225}
1865 1226
@@ -1891,6 +1252,7 @@ fec_probe(struct platform_device *pdev)
1891 memset(fep, 0, sizeof(*fep)); 1252 memset(fep, 0, sizeof(*fep));
1892 1253
1893 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r)); 1254 ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
1255 fep->pdev = pdev;
1894 1256
1895 if (!ndev->base_addr) { 1257 if (!ndev->base_addr) {
1896 ret = -ENOMEM; 1258 ret = -ENOMEM;
@@ -1926,13 +1288,24 @@ fec_probe(struct platform_device *pdev)
1926 if (ret) 1288 if (ret)
1927 goto failed_init; 1289 goto failed_init;
1928 1290
1291 ret = fec_enet_mii_init(pdev);
1292 if (ret)
1293 goto failed_mii_init;
1294
1929 ret = register_netdev(ndev); 1295 ret = register_netdev(ndev);
1930 if (ret) 1296 if (ret)
1931 goto failed_register; 1297 goto failed_register;
1932 1298
1299 printk(KERN_INFO "%s: Freescale FEC PHY driver [%s] "
1300 "(mii_bus:phy_addr=%s, irq=%d)\n", ndev->name,
1301 fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev),
1302 fep->phy_dev->irq);
1303
1933 return 0; 1304 return 0;
1934 1305
1935failed_register: 1306failed_register:
1307 fec_enet_mii_remove(fep);
1308failed_mii_init:
1936failed_init: 1309failed_init:
1937 clk_disable(fep->clk); 1310 clk_disable(fep->clk);
1938 clk_put(fep->clk); 1311 clk_put(fep->clk);
@@ -1959,6 +1332,7 @@ fec_drv_remove(struct platform_device *pdev)
1959 platform_set_drvdata(pdev, NULL); 1332 platform_set_drvdata(pdev, NULL);
1960 1333
1961 fec_stop(ndev); 1334 fec_stop(ndev);
1335 fec_enet_mii_remove(fep);
1962 clk_disable(fep->clk); 1336 clk_disable(fep->clk);
1963 clk_put(fep->clk); 1337 clk_put(fep->clk);
1964 iounmap((void __iomem *)ndev->base_addr); 1338 iounmap((void __iomem *)ndev->base_addr);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 4a43e56b7394..0376c3e472a7 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -436,7 +436,6 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
436 DMA_FROM_DEVICE); 436 DMA_FROM_DEVICE);
437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 437 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */ 438 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->dev = dev;
440 rskb->protocol = eth_type_trans(rskb, dev); 439 rskb->protocol = eth_type_trans(rskb, dev);
441 netif_rx(rskb); 440 netif_rx(rskb);
442 441
@@ -576,12 +575,12 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
576 out_be32(&fec->gaddr2, 0xffffffff); 575 out_be32(&fec->gaddr2, 0xffffffff);
577 } else { 576 } else {
578 u32 crc; 577 u32 crc;
579 struct dev_mc_list *dmi; 578 struct netdev_hw_addr *ha;
580 u32 gaddr1 = 0x00000000; 579 u32 gaddr1 = 0x00000000;
581 u32 gaddr2 = 0x00000000; 580 u32 gaddr2 = 0x00000000;
582 581
583 netdev_for_each_mc_addr(dmi, dev) { 582 netdev_for_each_mc_addr(ha, dev) {
584 crc = ether_crc_le(6, dmi->dmi_addr) >> 26; 583 crc = ether_crc_le(6, ha->addr) >> 26;
585 if (crc >= 32) 584 if (crc >= 32)
586 gaddr1 |= 1 << (crc-32); 585 gaddr1 |= 1 << (crc-32);
587 else 586 else
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 5c98f7c22425..a1c0e7bb70e8 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -2148,7 +2148,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2148 unsigned int i; 2148 unsigned int i;
2149 u32 offset = 0; 2149 u32 offset = 0;
2150 u32 bcnt; 2150 u32 bcnt;
2151 u32 size = skb->len-skb->data_len; 2151 u32 size = skb_headlen(skb);
2152 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2152 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2153 u32 empty_slots; 2153 u32 empty_slots;
2154 struct ring_desc* put_tx; 2154 struct ring_desc* put_tx;
@@ -2269,7 +2269,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2269 unsigned int i; 2269 unsigned int i;
2270 u32 offset = 0; 2270 u32 offset = 0;
2271 u32 bcnt; 2271 u32 bcnt;
2272 u32 size = skb->len-skb->data_len; 2272 u32 size = skb_headlen(skb);
2273 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2273 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2274 u32 empty_slots; 2274 u32 empty_slots;
2275 struct ring_desc_ex* put_tx; 2275 struct ring_desc_ex* put_tx;
@@ -3104,12 +3104,14 @@ static void nv_set_multicast(struct net_device *dev)
3104 if (dev->flags & IFF_ALLMULTI) { 3104 if (dev->flags & IFF_ALLMULTI) {
3105 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0; 3105 alwaysOn[0] = alwaysOn[1] = alwaysOff[0] = alwaysOff[1] = 0;
3106 } else { 3106 } else {
3107 struct dev_mc_list *walk; 3107 struct netdev_hw_addr *ha;
3108 3108
3109 netdev_for_each_mc_addr(walk, dev) { 3109 netdev_for_each_mc_addr(ha, dev) {
3110 unsigned char *addr = ha->addr;
3110 u32 a, b; 3111 u32 a, b;
3111 a = le32_to_cpu(*(__le32 *) walk->dmi_addr); 3112
3112 b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4])); 3113 a = le32_to_cpu(*(__le32 *) addr);
3114 b = le16_to_cpu(*(__le16 *) (&addr[4]));
3113 alwaysOn[0] &= a; 3115 alwaysOn[0] &= a;
3114 alwaysOff[0] &= ~a; 3116 alwaysOff[0] &= ~a;
3115 alwaysOn[1] &= b; 3117 alwaysOn[1] &= b;
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 0a973e71876b..714da967fa19 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -231,12 +231,12 @@ static void set_multicast_finish(struct net_device *dev)
231 231
232static void set_multicast_list(struct net_device *dev) 232static void set_multicast_list(struct net_device *dev)
233{ 233{
234 struct dev_mc_list *pmc; 234 struct netdev_hw_addr *ha;
235 235
236 if ((dev->flags & IFF_PROMISC) == 0) { 236 if ((dev->flags & IFF_PROMISC) == 0) {
237 set_multicast_start(dev); 237 set_multicast_start(dev);
238 netdev_for_each_mc_addr(pmc, dev) 238 netdev_for_each_mc_addr(ha, dev)
239 set_multicast_one(dev, pmc->dmi_addr); 239 set_multicast_one(dev, ha->addr);
240 set_multicast_finish(dev); 240 set_multicast_finish(dev);
241 } else 241 } else
242 set_promiscuous_mode(dev); 242 set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ec81f50d5919..7eff92ef01da 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -232,12 +232,12 @@ static void set_multicast_finish(struct net_device *dev)
232 232
233static void set_multicast_list(struct net_device *dev) 233static void set_multicast_list(struct net_device *dev)
234{ 234{
235 struct dev_mc_list *pmc; 235 struct netdev_hw_addr *ha;
236 236
237 if ((dev->flags & IFF_PROMISC) == 0) { 237 if ((dev->flags & IFF_PROMISC) == 0) {
238 set_multicast_start(dev); 238 set_multicast_start(dev);
239 netdev_for_each_mc_addr(pmc, dev) 239 netdev_for_each_mc_addr(ha, dev)
240 set_multicast_one(dev, pmc->dmi_addr); 240 set_multicast_one(dev, ha->addr);
241 set_multicast_finish(dev); 241 set_multicast_finish(dev);
242 } else 242 } else
243 set_promiscuous_mode(dev); 243 set_promiscuous_mode(dev);
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 34d3da751eb4..7f0591e43cd9 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -223,12 +223,12 @@ static void set_multicast_finish(struct net_device *dev)
223 223
224static void set_multicast_list(struct net_device *dev) 224static void set_multicast_list(struct net_device *dev)
225{ 225{
226 struct dev_mc_list *pmc; 226 struct netdev_hw_addr *ha;
227 227
228 if ((dev->flags & IFF_PROMISC) == 0) { 228 if ((dev->flags & IFF_PROMISC) == 0) {
229 set_multicast_start(dev); 229 set_multicast_start(dev);
230 netdev_for_each_mc_addr(pmc, dev) 230 netdev_for_each_mc_addr(ha, dev)
231 set_multicast_one(dev, pmc->dmi_addr); 231 set_multicast_one(dev, ha->addr);
232 set_multicast_finish(dev); 232 set_multicast_finish(dev);
233 } else 233 } else
234 set_promiscuous_mode(dev); 234 set_promiscuous_mode(dev);
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 080d1cea5b26..032073d1e3d2 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -82,6 +82,7 @@
82#include <linux/tcp.h> 82#include <linux/tcp.h>
83#include <linux/udp.h> 83#include <linux/udp.h>
84#include <linux/in.h> 84#include <linux/in.h>
85#include <linux/net_tstamp.h>
85 86
86#include <asm/io.h> 87#include <asm/io.h>
87#include <asm/irq.h> 88#include <asm/irq.h>
@@ -377,6 +378,13 @@ static void gfar_init_mac(struct net_device *ndev)
377 rctrl |= RCTRL_PADDING(priv->padding); 378 rctrl |= RCTRL_PADDING(priv->padding);
378 } 379 }
379 380
381 /* Insert receive time stamps into padding alignment bytes */
382 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
383 rctrl &= ~RCTRL_PAL_MASK;
384 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE | RCTRL_PADDING(8);
385 priv->padding = 8;
386 }
387
380 /* keep vlan related bits if it's enabled */ 388 /* keep vlan related bits if it's enabled */
381 if (priv->vlgrp) { 389 if (priv->vlgrp) {
382 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 390 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
@@ -501,7 +509,8 @@ void unlock_tx_qs(struct gfar_private *priv)
501/* Returns 1 if incoming frames use an FCB */ 509/* Returns 1 if incoming frames use an FCB */
502static inline int gfar_uses_fcb(struct gfar_private *priv) 510static inline int gfar_uses_fcb(struct gfar_private *priv)
503{ 511{
504 return priv->vlgrp || priv->rx_csum_enable; 512 return priv->vlgrp || priv->rx_csum_enable ||
513 (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER);
505} 514}
506 515
507static void free_tx_pointers(struct gfar_private *priv) 516static void free_tx_pointers(struct gfar_private *priv)
@@ -742,7 +751,8 @@ static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
742 FSL_GIANFAR_DEV_HAS_CSUM | 751 FSL_GIANFAR_DEV_HAS_CSUM |
743 FSL_GIANFAR_DEV_HAS_VLAN | 752 FSL_GIANFAR_DEV_HAS_VLAN |
744 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET | 753 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
745 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH; 754 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
755 FSL_GIANFAR_DEV_HAS_TIMER;
746 756
747 ctype = of_get_property(np, "phy-connection-type", NULL); 757 ctype = of_get_property(np, "phy-connection-type", NULL);
748 758
@@ -772,6 +782,48 @@ err_grp_init:
772 return err; 782 return err;
773} 783}
774 784
785static int gfar_hwtstamp_ioctl(struct net_device *netdev,
786 struct ifreq *ifr, int cmd)
787{
788 struct hwtstamp_config config;
789 struct gfar_private *priv = netdev_priv(netdev);
790
791 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
792 return -EFAULT;
793
794 /* reserved for future extensions */
795 if (config.flags)
796 return -EINVAL;
797
798 switch (config.tx_type) {
799 case HWTSTAMP_TX_OFF:
800 priv->hwts_tx_en = 0;
801 break;
802 case HWTSTAMP_TX_ON:
803 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
804 return -ERANGE;
805 priv->hwts_tx_en = 1;
806 break;
807 default:
808 return -ERANGE;
809 }
810
811 switch (config.rx_filter) {
812 case HWTSTAMP_FILTER_NONE:
813 priv->hwts_rx_en = 0;
814 break;
815 default:
816 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
817 return -ERANGE;
818 priv->hwts_rx_en = 1;
819 config.rx_filter = HWTSTAMP_FILTER_ALL;
820 break;
821 }
822
823 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
824 -EFAULT : 0;
825}
826
775/* Ioctl MII Interface */ 827/* Ioctl MII Interface */
776static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 828static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
777{ 829{
@@ -780,6 +832,9 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
780 if (!netif_running(dev)) 832 if (!netif_running(dev))
781 return -EINVAL; 833 return -EINVAL;
782 834
835 if (cmd == SIOCSHWTSTAMP)
836 return gfar_hwtstamp_ioctl(dev, rq, cmd);
837
783 if (!priv->phydev) 838 if (!priv->phydev)
784 return -ENODEV; 839 return -ENODEV;
785 840
@@ -982,7 +1037,8 @@ static int gfar_probe(struct of_device *ofdev,
982 else 1037 else
983 priv->padding = 0; 1038 priv->padding = 0;
984 1039
985 if (dev->features & NETIF_F_IP_CSUM) 1040 if (dev->features & NETIF_F_IP_CSUM ||
1041 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
986 dev->hard_header_len += GMAC_FCB_LEN; 1042 dev->hard_header_len += GMAC_FCB_LEN;
987 1043
988 /* Program the isrg regs only if number of grps > 1 */ 1044 /* Program the isrg regs only if number of grps > 1 */
@@ -1926,23 +1982,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1926 struct netdev_queue *txq; 1982 struct netdev_queue *txq;
1927 struct gfar __iomem *regs = NULL; 1983 struct gfar __iomem *regs = NULL;
1928 struct txfcb *fcb = NULL; 1984 struct txfcb *fcb = NULL;
1929 struct txbd8 *txbdp, *txbdp_start, *base; 1985 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1930 u32 lstatus; 1986 u32 lstatus;
1931 int i, rq = 0; 1987 int i, rq = 0, do_tstamp = 0;
1932 u32 bufaddr; 1988 u32 bufaddr;
1933 unsigned long flags; 1989 unsigned long flags;
1934 unsigned int nr_frags, length; 1990 unsigned int nr_frags, nr_txbds, length;
1935 1991 union skb_shared_tx *shtx;
1936 1992
1937 rq = skb->queue_mapping; 1993 rq = skb->queue_mapping;
1938 tx_queue = priv->tx_queue[rq]; 1994 tx_queue = priv->tx_queue[rq];
1939 txq = netdev_get_tx_queue(dev, rq); 1995 txq = netdev_get_tx_queue(dev, rq);
1940 base = tx_queue->tx_bd_base; 1996 base = tx_queue->tx_bd_base;
1941 regs = tx_queue->grp->regs; 1997 regs = tx_queue->grp->regs;
1998 shtx = skb_tx(skb);
1999
2000 /* check if time stamp should be generated */
2001 if (unlikely(shtx->hardware && priv->hwts_tx_en))
2002 do_tstamp = 1;
1942 2003
1943 /* make space for additional header when fcb is needed */ 2004 /* make space for additional header when fcb is needed */
1944 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 2005 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
1945 (priv->vlgrp && vlan_tx_tag_present(skb))) && 2006 (priv->vlgrp && vlan_tx_tag_present(skb)) ||
2007 unlikely(do_tstamp)) &&
1946 (skb_headroom(skb) < GMAC_FCB_LEN)) { 2008 (skb_headroom(skb) < GMAC_FCB_LEN)) {
1947 struct sk_buff *skb_new; 2009 struct sk_buff *skb_new;
1948 2010
@@ -1959,8 +2021,14 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1959 /* total number of fragments in the SKB */ 2021 /* total number of fragments in the SKB */
1960 nr_frags = skb_shinfo(skb)->nr_frags; 2022 nr_frags = skb_shinfo(skb)->nr_frags;
1961 2023
2024 /* calculate the required number of TxBDs for this skb */
2025 if (unlikely(do_tstamp))
2026 nr_txbds = nr_frags + 2;
2027 else
2028 nr_txbds = nr_frags + 1;
2029
1962 /* check if there is space to queue this packet */ 2030 /* check if there is space to queue this packet */
1963 if ((nr_frags+1) > tx_queue->num_txbdfree) { 2031 if (nr_txbds > tx_queue->num_txbdfree) {
1964 /* no space, stop the queue */ 2032 /* no space, stop the queue */
1965 netif_tx_stop_queue(txq); 2033 netif_tx_stop_queue(txq);
1966 dev->stats.tx_fifo_errors++; 2034 dev->stats.tx_fifo_errors++;
@@ -1972,9 +2040,19 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1972 txq->tx_packets ++; 2040 txq->tx_packets ++;
1973 2041
1974 txbdp = txbdp_start = tx_queue->cur_tx; 2042 txbdp = txbdp_start = tx_queue->cur_tx;
2043 lstatus = txbdp->lstatus;
2044
2045 /* Time stamp insertion requires one additional TxBD */
2046 if (unlikely(do_tstamp))
2047 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
2048 tx_queue->tx_ring_size);
1975 2049
1976 if (nr_frags == 0) { 2050 if (nr_frags == 0) {
1977 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 2051 if (unlikely(do_tstamp))
2052 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_LAST |
2053 TXBD_INTERRUPT);
2054 else
2055 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1978 } else { 2056 } else {
1979 /* Place the fragment addresses and lengths into the TxBDs */ 2057 /* Place the fragment addresses and lengths into the TxBDs */
1980 for (i = 0; i < nr_frags; i++) { 2058 for (i = 0; i < nr_frags; i++) {
@@ -2020,11 +2098,32 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2020 gfar_tx_vlan(skb, fcb); 2098 gfar_tx_vlan(skb, fcb);
2021 } 2099 }
2022 2100
2023 /* setup the TxBD length and buffer pointer for the first BD */ 2101 /* Setup tx hardware time stamping if requested */
2102 if (unlikely(do_tstamp)) {
2103 shtx->in_progress = 1;
2104 if (fcb == NULL)
2105 fcb = gfar_add_fcb(skb);
2106 fcb->ptp = 1;
2107 lstatus |= BD_LFLAG(TXBD_TOE);
2108 }
2109
2024 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 2110 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
2025 skb_headlen(skb), DMA_TO_DEVICE); 2111 skb_headlen(skb), DMA_TO_DEVICE);
2026 2112
2027 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 2113 /*
2114 * If time stamping is requested one additional TxBD must be set up. The
2115 * first TxBD points to the FCB and must have a data length of
2116 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
2117 * the full frame length.
2118 */
2119 if (unlikely(do_tstamp)) {
2120 txbdp_tstamp->bufPtr = txbdp_start->bufPtr + GMAC_FCB_LEN;
2121 txbdp_tstamp->lstatus |= BD_LFLAG(TXBD_READY) |
2122 (skb_headlen(skb) - GMAC_FCB_LEN);
2123 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
2124 } else {
2125 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
2126 }
2028 2127
2029 /* 2128 /*
2030 * We can work in parallel with gfar_clean_tx_ring(), except 2129 * We can work in parallel with gfar_clean_tx_ring(), except
@@ -2064,7 +2163,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
2064 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size); 2163 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
2065 2164
2066 /* reduce TxBD free count */ 2165 /* reduce TxBD free count */
2067 tx_queue->num_txbdfree -= (nr_frags + 1); 2166 tx_queue->num_txbdfree -= (nr_txbds);
2068 2167
2069 dev->trans_start = jiffies; 2168 dev->trans_start = jiffies;
2070 2169
@@ -2255,16 +2354,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2255 struct net_device *dev = tx_queue->dev; 2354 struct net_device *dev = tx_queue->dev;
2256 struct gfar_private *priv = netdev_priv(dev); 2355 struct gfar_private *priv = netdev_priv(dev);
2257 struct gfar_priv_rx_q *rx_queue = NULL; 2356 struct gfar_priv_rx_q *rx_queue = NULL;
2258 struct txbd8 *bdp; 2357 struct txbd8 *bdp, *next = NULL;
2259 struct txbd8 *lbdp = NULL; 2358 struct txbd8 *lbdp = NULL;
2260 struct txbd8 *base = tx_queue->tx_bd_base; 2359 struct txbd8 *base = tx_queue->tx_bd_base;
2261 struct sk_buff *skb; 2360 struct sk_buff *skb;
2262 int skb_dirtytx; 2361 int skb_dirtytx;
2263 int tx_ring_size = tx_queue->tx_ring_size; 2362 int tx_ring_size = tx_queue->tx_ring_size;
2264 int frags = 0; 2363 int frags = 0, nr_txbds = 0;
2265 int i; 2364 int i;
2266 int howmany = 0; 2365 int howmany = 0;
2267 u32 lstatus; 2366 u32 lstatus;
2367 size_t buflen;
2368 union skb_shared_tx *shtx;
2268 2369
2269 rx_queue = priv->rx_queue[tx_queue->qindex]; 2370 rx_queue = priv->rx_queue[tx_queue->qindex];
2270 bdp = tx_queue->dirty_tx; 2371 bdp = tx_queue->dirty_tx;
@@ -2274,7 +2375,18 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2274 unsigned long flags; 2375 unsigned long flags;
2275 2376
2276 frags = skb_shinfo(skb)->nr_frags; 2377 frags = skb_shinfo(skb)->nr_frags;
2277 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2378
2379 /*
2380 * When time stamping, one additional TxBD must be freed.
2381 * Also, we need to dma_unmap_single() the TxPAL.
2382 */
2383 shtx = skb_tx(skb);
2384 if (unlikely(shtx->in_progress))
2385 nr_txbds = frags + 2;
2386 else
2387 nr_txbds = frags + 1;
2388
2389 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2278 2390
2279 lstatus = lbdp->lstatus; 2391 lstatus = lbdp->lstatus;
2280 2392
@@ -2283,10 +2395,24 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2283 (lstatus & BD_LENGTH_MASK)) 2395 (lstatus & BD_LENGTH_MASK))
2284 break; 2396 break;
2285 2397
2286 dma_unmap_single(&priv->ofdev->dev, 2398 if (unlikely(shtx->in_progress)) {
2287 bdp->bufPtr, 2399 next = next_txbd(bdp, base, tx_ring_size);
2288 bdp->length, 2400 buflen = next->length + GMAC_FCB_LEN;
2289 DMA_TO_DEVICE); 2401 } else
2402 buflen = bdp->length;
2403
2404 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
2405 buflen, DMA_TO_DEVICE);
2406
2407 if (unlikely(shtx->in_progress)) {
2408 struct skb_shared_hwtstamps shhwtstamps;
2409 u64 *ns = (u64*) (((u32)skb->data + 0x10) & ~0x7);
2410 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2411 shhwtstamps.hwtstamp = ns_to_ktime(*ns);
2412 skb_tstamp_tx(skb, &shhwtstamps);
2413 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2414 bdp = next;
2415 }
2290 2416
2291 bdp->lstatus &= BD_LFLAG(TXBD_WRAP); 2417 bdp->lstatus &= BD_LFLAG(TXBD_WRAP);
2292 bdp = next_txbd(bdp, base, tx_ring_size); 2418 bdp = next_txbd(bdp, base, tx_ring_size);
@@ -2318,7 +2444,7 @@ static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2318 2444
2319 howmany++; 2445 howmany++;
2320 spin_lock_irqsave(&tx_queue->txlock, flags); 2446 spin_lock_irqsave(&tx_queue->txlock, flags);
2321 tx_queue->num_txbdfree += frags + 1; 2447 tx_queue->num_txbdfree += nr_txbds;
2322 spin_unlock_irqrestore(&tx_queue->txlock, flags); 2448 spin_unlock_irqrestore(&tx_queue->txlock, flags);
2323 } 2449 }
2324 2450
@@ -2474,6 +2600,17 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
2474 skb_pull(skb, amount_pull); 2600 skb_pull(skb, amount_pull);
2475 } 2601 }
2476 2602
2603 /* Get receive timestamp from the skb */
2604 if (priv->hwts_rx_en) {
2605 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2606 u64 *ns = (u64 *) skb->data;
2607 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2608 shhwtstamps->hwtstamp = ns_to_ktime(*ns);
2609 }
2610
2611 if (priv->padding)
2612 skb_pull(skb, priv->padding);
2613
2477 if (priv->rx_csum_enable) 2614 if (priv->rx_csum_enable)
2478 gfar_rx_checksum(skb, fcb); 2615 gfar_rx_checksum(skb, fcb);
2479 2616
@@ -2510,8 +2647,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
2510 bdp = rx_queue->cur_rx; 2647 bdp = rx_queue->cur_rx;
2511 base = rx_queue->rx_bd_base; 2648 base = rx_queue->rx_bd_base;
2512 2649
2513 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2650 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0);
2514 priv->padding;
2515 2651
2516 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) { 2652 while (!((bdp->status & RXBD_EMPTY) || (--rx_work_limit < 0))) {
2517 struct sk_buff *newskb; 2653 struct sk_buff *newskb;
@@ -2798,7 +2934,7 @@ static void adjust_link(struct net_device *dev)
2798 * whenever dev->flags is changed */ 2934 * whenever dev->flags is changed */
2799static void gfar_set_multi(struct net_device *dev) 2935static void gfar_set_multi(struct net_device *dev)
2800{ 2936{
2801 struct dev_mc_list *mc_ptr; 2937 struct netdev_hw_addr *ha;
2802 struct gfar_private *priv = netdev_priv(dev); 2938 struct gfar_private *priv = netdev_priv(dev);
2803 struct gfar __iomem *regs = priv->gfargrp[0].regs; 2939 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2804 u32 tempval; 2940 u32 tempval;
@@ -2871,13 +3007,12 @@ static void gfar_set_multi(struct net_device *dev)
2871 return; 3007 return;
2872 3008
2873 /* Parse the list, and set the appropriate bits */ 3009 /* Parse the list, and set the appropriate bits */
2874 netdev_for_each_mc_addr(mc_ptr, dev) { 3010 netdev_for_each_mc_addr(ha, dev) {
2875 if (idx < em_num) { 3011 if (idx < em_num) {
2876 gfar_set_mac_for_addr(dev, idx, 3012 gfar_set_mac_for_addr(dev, idx, ha->addr);
2877 mc_ptr->dmi_addr);
2878 idx++; 3013 idx++;
2879 } else 3014 } else
2880 gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr); 3015 gfar_set_hash_for_addr(dev, ha->addr);
2881 } 3016 }
2882 } 3017 }
2883 3018
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 17d25e714236..ac4a92e08c09 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -262,6 +262,7 @@ extern const char gfar_driver_version[];
262 262
263#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size) 263#define next_bd(bdp, base, ring_size) skip_bd(bdp, 1, base, ring_size)
264 264
265#define RCTRL_TS_ENABLE 0x01000000
265#define RCTRL_PAL_MASK 0x001f0000 266#define RCTRL_PAL_MASK 0x001f0000
266#define RCTRL_VLEX 0x00002000 267#define RCTRL_VLEX 0x00002000
267#define RCTRL_FILREN 0x00001000 268#define RCTRL_FILREN 0x00001000
@@ -539,7 +540,7 @@ struct txbd8
539 540
540struct txfcb { 541struct txfcb {
541 u8 flags; 542 u8 flags;
542 u8 reserved; 543 u8 ptp; /* Flag to enable tx timestamping */
543 u8 l4os; /* Level 4 Header Offset */ 544 u8 l4os; /* Level 4 Header Offset */
544 u8 l3os; /* Level 3 Header Offset */ 545 u8 l3os; /* Level 3 Header Offset */
545 u16 phcs; /* Pseudo-header Checksum */ 546 u16 phcs; /* Pseudo-header Checksum */
@@ -885,6 +886,7 @@ struct gfar {
885#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100 886#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
886#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 887#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
887#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 888#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
889#define FSL_GIANFAR_DEV_HAS_TIMER 0x00000800
888 890
889#if (MAXGROUPS == 2) 891#if (MAXGROUPS == 2)
890#define DEFAULT_MAPPING 0xAA 892#define DEFAULT_MAPPING 0xAA
@@ -1100,6 +1102,10 @@ struct gfar_private {
1100 1102
1101 /* Network Statistics */ 1103 /* Network Statistics */
1102 struct gfar_extra_stats extra_stats; 1104 struct gfar_extra_stats extra_stats;
1105
1106 /* HW time stamping enabled flag */
1107 int hwts_rx_en;
1108 int hwts_tx_en;
1103}; 1109};
1104 1110
1105extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1]; 1111extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
index 3a90430de918..fd491e409488 100644
--- a/drivers/net/greth.c
+++ b/drivers/net/greth.c
@@ -895,7 +895,6 @@ static int greth_rx_gbit(struct net_device *dev, int limit)
895 else 895 else
896 skb->ip_summed = CHECKSUM_NONE; 896 skb->ip_summed = CHECKSUM_NONE;
897 897
898 skb->dev = dev;
899 skb->protocol = eth_type_trans(skb, dev); 898 skb->protocol = eth_type_trans(skb, dev);
900 dev->stats.rx_packets++; 899 dev->stats.rx_packets++;
901 netif_receive_skb(skb); 900 netif_receive_skb(skb);
@@ -990,7 +989,7 @@ static u32 greth_hash_get_index(__u8 *addr)
990 989
991static void greth_set_hash_filter(struct net_device *dev) 990static void greth_set_hash_filter(struct net_device *dev)
992{ 991{
993 struct dev_mc_list *curr; 992 struct netdev_hw_addr *ha;
994 struct greth_private *greth = netdev_priv(dev); 993 struct greth_private *greth = netdev_priv(dev);
995 struct greth_regs *regs = (struct greth_regs *) greth->regs; 994 struct greth_regs *regs = (struct greth_regs *) greth->regs;
996 u32 mc_filter[2]; 995 u32 mc_filter[2];
@@ -998,8 +997,8 @@ static void greth_set_hash_filter(struct net_device *dev)
998 997
999 mc_filter[0] = mc_filter[1] = 0; 998 mc_filter[0] = mc_filter[1] = 0;
1000 999
1001 netdev_for_each_mc_addr(curr, dev) { 1000 netdev_for_each_mc_addr(ha, dev) {
1002 bitnr = greth_hash_get_index(curr->dmi_addr); 1001 bitnr = greth_hash_get_index(ha->addr);
1003 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 1002 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
1004 } 1003 }
1005 1004
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 5d6f13879592..83f43bb835d6 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1857,12 +1857,12 @@ static void set_rx_mode(struct net_device *dev)
1857 /* Too many to match, or accept all multicasts. */ 1857 /* Too many to match, or accept all multicasts. */
1858 writew(0x000B, ioaddr + AddrMode); 1858 writew(0x000B, ioaddr + AddrMode);
1859 } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */ 1859 } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
1860 struct dev_mc_list *mclist; 1860 struct netdev_hw_addr *ha;
1861 int i = 0; 1861 int i = 0;
1862 1862
1863 netdev_for_each_mc_addr(mclist, dev) { 1863 netdev_for_each_mc_addr(ha, dev) {
1864 writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8); 1864 writel(*(u32 *)(ha->addr), ioaddr + 0x100 + i*8);
1865 writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]), 1865 writel(0x20000 | (*(u16 *)&ha->addr[4]),
1866 ioaddr + 0x104 + i*8); 1866 ioaddr + 0x104 + i*8);
1867 i++; 1867 i++;
1868 } 1868 }
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c
index 0cab992b3d1a..3e25f10cabd6 100644
--- a/drivers/net/hamradio/baycom_ser_fdx.c
+++ b/drivers/net/hamradio/baycom_ser_fdx.c
@@ -429,7 +429,7 @@ static int ser12_open(struct net_device *dev)
429 return -EINVAL; 429 return -EINVAL;
430 } 430 }
431 if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) { 431 if (!request_region(dev->base_addr, SER12_EXTENT, "baycom_ser_fdx")) {
432 printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy \n", 432 printk(KERN_WARNING "BAYCOM_SER_FSX: I/O port 0x%04lx busy\n",
433 dev->base_addr); 433 dev->base_addr);
434 return -EACCES; 434 return -EACCES;
435 } 435 }
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 4daad8cd56ea..0f3f6c2e6942 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -2099,15 +2099,15 @@ static void hp100_set_multicast_list(struct net_device *dev)
2099 } else { 2099 } else {
2100 int i, idx; 2100 int i, idx;
2101 u_char *addrs; 2101 u_char *addrs;
2102 struct dev_mc_list *dmi; 2102 struct netdev_hw_addr *ha;
2103 2103
2104 memset(&lp->hash_bytes, 0x00, 8); 2104 memset(&lp->hash_bytes, 0x00, 8);
2105#ifdef HP100_DEBUG 2105#ifdef HP100_DEBUG
2106 printk("hp100: %s: computing hash filter - mc_count = %i\n", 2106 printk("hp100: %s: computing hash filter - mc_count = %i\n",
2107 dev->name, netdev_mc_count(dev)); 2107 dev->name, netdev_mc_count(dev));
2108#endif 2108#endif
2109 netdev_for_each_mc_addr(dmi, dev) { 2109 netdev_for_each_mc_addr(ha, dev) {
2110 addrs = dmi->dmi_addr; 2110 addrs = ha->addr;
2111 if ((*addrs & 0x01) == 0x01) { /* multicast address? */ 2111 if ((*addrs & 0x01) == 0x01) { /* multicast address? */
2112#ifdef HP100_DEBUG 2112#ifdef HP100_DEBUG
2113 printk("hp100: %s: multicast = %pM, ", 2113 printk("hp100: %s: multicast = %pM, ",
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index dd873cc41c2b..2484e9e6c1ed 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -389,18 +389,19 @@ static void emac_hash_mc(struct emac_instance *dev)
389 const int regs = EMAC_XAHT_REGS(dev); 389 const int regs = EMAC_XAHT_REGS(dev);
390 u32 *gaht_base = emac_gaht_base(dev); 390 u32 *gaht_base = emac_gaht_base(dev);
391 u32 gaht_temp[regs]; 391 u32 gaht_temp[regs];
392 struct dev_mc_list *dmi; 392 struct netdev_hw_addr *ha;
393 int i; 393 int i;
394 394
395 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev)); 395 DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
396 396
397 memset(gaht_temp, 0, sizeof (gaht_temp)); 397 memset(gaht_temp, 0, sizeof (gaht_temp));
398 398
399 netdev_for_each_mc_addr(dmi, dev->ndev) { 399 netdev_for_each_mc_addr(ha, dev->ndev) {
400 int slot, reg, mask; 400 int slot, reg, mask;
401 DBG2(dev, "mc %pM" NL, dmi->dmi_addr); 401 DBG2(dev, "mc %pM" NL, ha->addr);
402 402
403 slot = EMAC_XAHT_CRC_TO_SLOT(dev, ether_crc(ETH_ALEN, dmi->dmi_addr)); 403 slot = EMAC_XAHT_CRC_TO_SLOT(dev,
404 ether_crc(ETH_ALEN, ha->addr));
404 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot); 405 reg = EMAC_XAHT_SLOT_TO_REG(dev, slot);
405 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot); 406 mask = EMAC_XAHT_SLOT_TO_MASK(dev, slot);
406 407
@@ -1177,7 +1178,7 @@ static int emac_open(struct net_device *ndev)
1177 netif_carrier_on(dev->ndev); 1178 netif_carrier_on(dev->ndev);
1178 1179
1179 /* Required for Pause packet support in EMAC */ 1180 /* Required for Pause packet support in EMAC */
1180 dev_mc_add(ndev, default_mcast_addr, sizeof(default_mcast_addr), 1); 1181 dev_mc_add_global(ndev, default_mcast_addr);
1181 1182
1182 emac_configure(dev); 1183 emac_configure(dev);
1183 mal_poll_add(dev->mal, &dev->commac); 1184 mal_poll_add(dev->mal, &dev->commac);
@@ -1700,7 +1701,6 @@ static int emac_poll_rx(void *param, int budget)
1700 1701
1701 skb_put(skb, len); 1702 skb_put(skb, len);
1702 push_packet: 1703 push_packet:
1703 skb->dev = dev->ndev;
1704 skb->protocol = eth_type_trans(skb, dev->ndev); 1704 skb->protocol = eth_type_trans(skb, dev->ndev);
1705 emac_rx_csum(dev, skb, ctrl); 1705 emac_rx_csum(dev, skb, ctrl);
1706 1706
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 7d6cf3340c11..294ccfb427cf 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -384,7 +384,7 @@ static void InitBoard(struct net_device *dev)
384 int camcnt; 384 int camcnt;
385 camentry_t cams[16]; 385 camentry_t cams[16];
386 u32 cammask; 386 u32 cammask;
387 struct dev_mc_list *mcptr; 387 struct netdev_hw_addr *ha;
388 u16 rcrval; 388 u16 rcrval;
389 389
390 /* reset the SONIC */ 390 /* reset the SONIC */
@@ -419,8 +419,8 @@ static void InitBoard(struct net_device *dev)
419 /* start putting the multicast addresses into the CAM list. Stop if 419 /* start putting the multicast addresses into the CAM list. Stop if
420 it is full. */ 420 it is full. */
421 421
422 netdev_for_each_mc_addr(mcptr, dev) { 422 netdev_for_each_mc_addr(ha, dev) {
423 putcam(cams, &camcnt, mcptr->dmi_addr); 423 putcam(cams, &camcnt, ha->addr);
424 if (camcnt == 16) 424 if (camcnt == 16)
425 break; 425 break;
426 } 426 }
@@ -478,7 +478,7 @@ static void InitBoard(struct net_device *dev)
478 /* if still multicast addresses left or ALLMULTI is set, set the multicast 478 /* if still multicast addresses left or ALLMULTI is set, set the multicast
479 enable bit */ 479 enable bit */
480 480
481 if ((dev->flags & IFF_ALLMULTI) || (mcptr != NULL)) 481 if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > camcnt)
482 rcrval |= RCREG_AMC; 482 rcrval |= RCREG_AMC;
483 483
484 /* promiscous mode ? */ 484 /* promiscous mode ? */
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index cd508a8ee25b..0d2c3ac2005e 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1073,7 +1073,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1073 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc); 1073 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1074 } 1074 }
1075 } else { 1075 } else {
1076 struct dev_mc_list *mclist; 1076 struct netdev_hw_addr *ha;
1077 /* clear the filter table & disable filtering */ 1077 /* clear the filter table & disable filtering */
1078 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1078 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1079 IbmVethMcastEnableRecv | 1079 IbmVethMcastEnableRecv |
@@ -1084,10 +1084,10 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
1084 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc); 1084 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1085 } 1085 }
1086 /* add the addresses to the filter table */ 1086 /* add the addresses to the filter table */
1087 netdev_for_each_mc_addr(mclist, netdev) { 1087 netdev_for_each_mc_addr(ha, netdev) {
1088 // add the multicast address to the filter table 1088 // add the multicast address to the filter table
1089 unsigned long mcast_addr = 0; 1089 unsigned long mcast_addr = 0;
1090 memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6); 1090 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1091 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, 1091 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1092 IbmVethMcastAddFilter, 1092 IbmVethMcastAddFilter,
1093 mcast_addr); 1093 mcast_addr);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index 4a32bed77c71..3ef495537dc5 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -104,6 +104,12 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
104 case E1000_DEV_ID_82580_COPPER_DUAL: 104 case E1000_DEV_ID_82580_COPPER_DUAL:
105 mac->type = e1000_82580; 105 mac->type = e1000_82580;
106 break; 106 break;
107 case E1000_DEV_ID_I350_COPPER:
108 case E1000_DEV_ID_I350_FIBER:
109 case E1000_DEV_ID_I350_SERDES:
110 case E1000_DEV_ID_I350_SGMII:
111 mac->type = e1000_i350;
112 break;
107 default: 113 default:
108 return -E1000_ERR_MAC_INIT; 114 return -E1000_ERR_MAC_INIT;
109 break; 115 break;
@@ -153,8 +159,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
153 mac->rar_entry_count = E1000_RAR_ENTRIES_82576; 159 mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
154 if (mac->type == e1000_82580) 160 if (mac->type == e1000_82580)
155 mac->rar_entry_count = E1000_RAR_ENTRIES_82580; 161 mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
162 if (mac->type == e1000_i350)
163 mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
156 /* reset */ 164 /* reset */
157 if (mac->type == e1000_82580) 165 if (mac->type >= e1000_82580)
158 mac->ops.reset_hw = igb_reset_hw_82580; 166 mac->ops.reset_hw = igb_reset_hw_82580;
159 else 167 else
160 mac->ops.reset_hw = igb_reset_hw_82575; 168 mac->ops.reset_hw = igb_reset_hw_82575;
@@ -225,7 +233,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
225 phy->ops.reset = igb_phy_hw_reset_sgmii_82575; 233 phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
226 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575; 234 phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
227 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575; 235 phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
228 } else if (hw->mac.type == e1000_82580) { 236 } else if (hw->mac.type >= e1000_82580) {
229 phy->ops.reset = igb_phy_hw_reset; 237 phy->ops.reset = igb_phy_hw_reset;
230 phy->ops.read_reg = igb_read_phy_reg_82580; 238 phy->ops.read_reg = igb_read_phy_reg_82580;
231 phy->ops.write_reg = igb_write_phy_reg_82580; 239 phy->ops.write_reg = igb_write_phy_reg_82580;
@@ -261,6 +269,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
261 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state; 269 phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
262 break; 270 break;
263 case I82580_I_PHY_ID: 271 case I82580_I_PHY_ID:
272 case I350_I_PHY_ID:
264 phy->type = e1000_phy_82580; 273 phy->type = e1000_phy_82580;
265 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580; 274 phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_82580;
266 phy->ops.get_cable_length = igb_get_cable_length_82580; 275 phy->ops.get_cable_length = igb_get_cable_length_82580;
@@ -1445,7 +1454,6 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
1445 **/ 1454 **/
1446static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data) 1455static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1447{ 1456{
1448 u32 mdicnfg = 0;
1449 s32 ret_val; 1457 s32 ret_val;
1450 1458
1451 1459
@@ -1453,15 +1461,6 @@ static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
1453 if (ret_val) 1461 if (ret_val)
1454 goto out; 1462 goto out;
1455 1463
1456 /*
1457 * We config the phy address in MDICNFG register now. Same bits
1458 * as before. The values in MDIC can be written but will be
1459 * ignored. This allows us to call the old function after
1460 * configuring the PHY address in the new register
1461 */
1462 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1463 wr32(E1000_MDICNFG, mdicnfg);
1464
1465 ret_val = igb_read_phy_reg_mdic(hw, offset, data); 1464 ret_val = igb_read_phy_reg_mdic(hw, offset, data);
1466 1465
1467 hw->phy.ops.release(hw); 1466 hw->phy.ops.release(hw);
@@ -1480,7 +1479,6 @@ out:
1480 **/ 1479 **/
1481static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data) 1480static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1482{ 1481{
1483 u32 mdicnfg = 0;
1484 s32 ret_val; 1482 s32 ret_val;
1485 1483
1486 1484
@@ -1488,15 +1486,6 @@ static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
1488 if (ret_val) 1486 if (ret_val)
1489 goto out; 1487 goto out;
1490 1488
1491 /*
1492 * We config the phy address in MDICNFG register now. Same bits
1493 * as before. The values in MDIC can be written but will be
1494 * ignored. This allows us to call the old function after
1495 * configuring the PHY address in the new register
1496 */
1497 mdicnfg = (hw->phy.addr << E1000_MDIC_PHY_SHIFT);
1498 wr32(E1000_MDICNFG, mdicnfg);
1499
1500 ret_val = igb_write_phy_reg_mdic(hw, offset, data); 1489 ret_val = igb_write_phy_reg_mdic(hw, offset, data);
1501 1490
1502 hw->phy.ops.release(hw); 1491 hw->phy.ops.release(hw);
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index fbe1c99c193c..cbd1e1259e4d 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -38,9 +38,10 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
38 (ID_LED_DEF1_DEF2 << 4) | \ 38 (ID_LED_DEF1_DEF2 << 4) | \
39 (ID_LED_OFF1_ON2)) 39 (ID_LED_OFF1_ON2))
40 40
41#define E1000_RAR_ENTRIES_82575 16 41#define E1000_RAR_ENTRIES_82575 16
42#define E1000_RAR_ENTRIES_82576 24 42#define E1000_RAR_ENTRIES_82576 24
43#define E1000_RAR_ENTRIES_82580 24 43#define E1000_RAR_ENTRIES_82580 24
44#define E1000_RAR_ENTRIES_I350 32
44 45
45#define E1000_SW_SYNCH_MB 0x00000100 46#define E1000_SW_SYNCH_MB 0x00000100
46#define E1000_STAT_DEV_RST_SET 0x00100000 47#define E1000_STAT_DEV_RST_SET 0x00100000
@@ -52,6 +53,7 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
52#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 53#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
53#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 54#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
54#define E1000_SRRCTL_DROP_EN 0x80000000 55#define E1000_SRRCTL_DROP_EN 0x80000000
56#define E1000_SRRCTL_TIMESTAMP 0x40000000
55 57
56#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002 58#define E1000_MRQC_ENABLE_RSS_4Q 0x00000002
57#define E1000_MRQC_ENABLE_VMDQ 0x00000003 59#define E1000_MRQC_ENABLE_VMDQ 0x00000003
@@ -108,6 +110,7 @@ union e1000_adv_rx_desc {
108#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 110#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
109#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 111#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
110#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */ 112#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
113#define E1000_RXDADV_STAT_TSIP 0x08000 /* timestamp in packet */
111 114
112/* Transmit Descriptor - Advanced */ 115/* Transmit Descriptor - Advanced */
113union e1000_adv_tx_desc { 116union e1000_adv_tx_desc {
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index fe6cf1b696c7..31d24e0e76de 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -629,6 +629,7 @@
629#define M88E1111_I_PHY_ID 0x01410CC0 629#define M88E1111_I_PHY_ID 0x01410CC0
630#define IGP03E1000_E_PHY_ID 0x02A80390 630#define IGP03E1000_E_PHY_ID 0x02A80390
631#define I82580_I_PHY_ID 0x015403A0 631#define I82580_I_PHY_ID 0x015403A0
632#define I350_I_PHY_ID 0x015403B0
632#define M88_VENDOR 0x0141 633#define M88_VENDOR 0x0141
633 634
634/* M88E1000 Specific Registers */ 635/* M88E1000 Specific Registers */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 82a533f5192a..cb8db78b1a05 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -31,6 +31,7 @@
31#include <linux/types.h> 31#include <linux/types.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/io.h> 33#include <linux/io.h>
34#include <linux/netdevice.h>
34 35
35#include "e1000_regs.h" 36#include "e1000_regs.h"
36#include "e1000_defines.h" 37#include "e1000_defines.h"
@@ -53,6 +54,10 @@ struct e1000_hw;
53#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
54#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
55#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_I350_COPPER 0x1521
58#define E1000_DEV_ID_I350_FIBER 0x1522
59#define E1000_DEV_ID_I350_SERDES 0x1523
60#define E1000_DEV_ID_I350_SGMII 0x1524
56 61
57#define E1000_REVISION_2 2 62#define E1000_REVISION_2 2
58#define E1000_REVISION_4 4 63#define E1000_REVISION_4 4
@@ -72,6 +77,7 @@ enum e1000_mac_type {
72 e1000_82575, 77 e1000_82575,
73 e1000_82576, 78 e1000_82576,
74 e1000_82580, 79 e1000_82580,
80 e1000_i350,
75 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 81 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
76}; 82};
77 83
@@ -502,14 +508,11 @@ struct e1000_hw {
502 u8 revision_id; 508 u8 revision_id;
503}; 509};
504 510
505#ifdef DEBUG 511extern struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
506extern char *igb_get_hw_dev_name(struct e1000_hw *hw);
507#define hw_dbg(format, arg...) \ 512#define hw_dbg(format, arg...) \
508 printk(KERN_DEBUG "%s: " format, igb_get_hw_dev_name(hw), ##arg) 513 netdev_dbg(igb_get_hw_dev(hw), format, ##arg)
509#else 514
510#define hw_dbg(format, arg...)
511#endif
512#endif
513/* These functions must be implemented by drivers */ 515/* These functions must be implemented by drivers */
514s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 516s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
515s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 517s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
518#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 3b772b822a5d..7d288ccca1ca 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -107,6 +107,7 @@ struct vf_data_storage {
107#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 107#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
108 108
109/* Supported Rx Buffer Sizes */ 109/* Supported Rx Buffer Sizes */
110#define IGB_RXBUFFER_64 64 /* Used for packet split */
110#define IGB_RXBUFFER_128 128 /* Used for packet split */ 111#define IGB_RXBUFFER_128 128 /* Used for packet split */
111#define IGB_RXBUFFER_1024 1024 112#define IGB_RXBUFFER_1024 1024
112#define IGB_RXBUFFER_2048 2048 113#define IGB_RXBUFFER_2048 2048
@@ -323,6 +324,7 @@ struct igb_adapter {
323 324
324#define IGB_82576_TSYNC_SHIFT 19 325#define IGB_82576_TSYNC_SHIFT 19
325#define IGB_82580_TSYNC_SHIFT 24 326#define IGB_82580_TSYNC_SHIFT 24
327#define IGB_TS_HDR_LEN 16
326enum e1000_state_t { 328enum e1000_state_t {
327 __IGB_TESTING, 329 __IGB_TESTING,
328 __IGB_RESETTING, 330 __IGB_RESETTING,
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index 743038490104..1b8fd7f4064d 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -902,6 +902,49 @@ struct igb_reg_test {
902#define TABLE64_TEST_LO 5 902#define TABLE64_TEST_LO 5
903#define TABLE64_TEST_HI 6 903#define TABLE64_TEST_HI 6
904 904
905/* i350 reg test */
906static struct igb_reg_test reg_test_i350[] = {
907 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
908 { E1000_FCAH, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
909 { E1000_FCT, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0xFFFFFFFF },
910 { E1000_VET, 0x100, 1, PATTERN_TEST, 0xFFFF0000, 0xFFFF0000 },
911 { E1000_RDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
912 { E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
913 { E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
914 { E1000_RDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
915 { E1000_RDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
916 { E1000_RDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
917 /* RDH is read-only for i350, only test RDT. */
918 { E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
919 { E1000_RDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
920 { E1000_FCRTH, 0x100, 1, PATTERN_TEST, 0x0000FFF0, 0x0000FFF0 },
921 { E1000_FCTTV, 0x100, 1, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
922 { E1000_TIPG, 0x100, 1, PATTERN_TEST, 0x3FFFFFFF, 0x3FFFFFFF },
923 { E1000_TDBAL(0), 0x100, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
924 { E1000_TDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
925 { E1000_TDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
926 { E1000_TDBAL(4), 0x40, 4, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
927 { E1000_TDBAH(4), 0x40, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
928 { E1000_TDLEN(4), 0x40, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
929 { E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
930 { E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
931 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
932 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
933 { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
934 { E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
935 { E1000_RA, 0, 16, TABLE64_TEST_LO,
936 0xFFFFFFFF, 0xFFFFFFFF },
937 { E1000_RA, 0, 16, TABLE64_TEST_HI,
938 0xC3FFFFFF, 0xFFFFFFFF },
939 { E1000_RA2, 0, 16, TABLE64_TEST_LO,
940 0xFFFFFFFF, 0xFFFFFFFF },
941 { E1000_RA2, 0, 16, TABLE64_TEST_HI,
942 0xC3FFFFFF, 0xFFFFFFFF },
943 { E1000_MTA, 0, 128, TABLE32_TEST,
944 0xFFFFFFFF, 0xFFFFFFFF },
945 { 0, 0, 0, 0 }
946};
947
905/* 82580 reg test */ 948/* 82580 reg test */
906static struct igb_reg_test reg_test_82580[] = { 949static struct igb_reg_test reg_test_82580[] = {
907 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 950 { E1000_FCAL, 0x100, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1077,6 +1120,10 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
1077 u32 i, toggle; 1120 u32 i, toggle;
1078 1121
1079 switch (adapter->hw.mac.type) { 1122 switch (adapter->hw.mac.type) {
1123 case e1000_i350:
1124 test = reg_test_i350;
1125 toggle = 0x7FEFF3FF;
1126 break;
1080 case e1000_82580: 1127 case e1000_82580:
1081 test = reg_test_82580; 1128 test = reg_test_82580;
1082 toggle = 0x7FEFF3FF; 1129 toggle = 0x7FEFF3FF;
@@ -1238,6 +1285,9 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1238 case e1000_82580: 1285 case e1000_82580:
1239 ics_mask = 0x77DCFED5; 1286 ics_mask = 0x77DCFED5;
1240 break; 1287 break;
1288 case e1000_i350:
1289 ics_mask = 0x77DCFED5;
1290 break;
1241 default: 1291 default:
1242 ics_mask = 0x7FFFFFFF; 1292 ics_mask = 0x7FFFFFFF;
1243 break; 1293 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c9baa2aa98cd..c19b1e0caecd 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -62,6 +62,10 @@ static const struct e1000_info *igb_info_tbl[] = {
62}; 62};
63 63
64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = { 64static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 }, 70 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 }, 71 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -223,43 +227,17 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
223 return stamp; 227 return stamp;
224} 228}
225 229
226#ifdef DEBUG
227/** 230/**
228 * igb_get_hw_dev_name - return device name string 231 * igb_get_hw_dev - return device
229 * used by hardware layer to print debugging information 232 * used by hardware layer to print debugging information
230 **/ 233 **/
231char *igb_get_hw_dev_name(struct e1000_hw *hw) 234struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
232{ 235{
233 struct igb_adapter *adapter = hw->back; 236 struct igb_adapter *adapter = hw->back;
234 return adapter->netdev->name; 237 return adapter->netdev;
235} 238}
236 239
237/** 240/**
238 * igb_get_time_str - format current NIC and system time as string
239 */
240static char *igb_get_time_str(struct igb_adapter *adapter,
241 char buffer[160])
242{
243 cycle_t hw = adapter->cycles.read(&adapter->cycles);
244 struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
245 struct timespec sys;
246 struct timespec delta;
247 getnstimeofday(&sys);
248
249 delta = timespec_sub(nic, sys);
250
251 sprintf(buffer,
252 "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
253 hw,
254 (long)nic.tv_sec, nic.tv_nsec,
255 (long)sys.tv_sec, sys.tv_nsec,
256 (long)delta.tv_sec, delta.tv_nsec);
257
258 return buffer;
259}
260#endif
261
262/**
263 * igb_init_module - Driver Registration Routine 241 * igb_init_module - Driver Registration Routine
264 * 242 *
265 * igb_init_module is the first routine called when the driver is 243 * igb_init_module is the first routine called when the driver is
@@ -328,6 +306,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
328 } 306 }
329 case e1000_82575: 307 case e1000_82575:
330 case e1000_82580: 308 case e1000_82580:
309 case e1000_i350:
331 default: 310 default:
332 for (; i < adapter->num_rx_queues; i++) 311 for (; i < adapter->num_rx_queues; i++)
333 adapter->rx_ring[i]->reg_idx = rbase_offset + i; 312 adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -471,6 +450,7 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
471 q_vector->eims_value = 1 << msix_vector; 450 q_vector->eims_value = 1 << msix_vector;
472 break; 451 break;
473 case e1000_82580: 452 case e1000_82580:
453 case e1000_i350:
474 /* 82580 uses the same table-based approach as 82576 but has fewer 454 /* 82580 uses the same table-based approach as 82576 but has fewer
475 entries as a result we carry over for queues greater than 4. */ 455 entries as a result we carry over for queues greater than 4. */
476 if (rx_queue > IGB_N0_QUEUE) { 456 if (rx_queue > IGB_N0_QUEUE) {
@@ -551,6 +531,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
551 531
552 case e1000_82576: 532 case e1000_82576:
553 case e1000_82580: 533 case e1000_82580:
534 case e1000_i350:
554 /* Turn on MSI-X capability first, or our settings 535 /* Turn on MSI-X capability first, or our settings
555 * won't stick. And it will take days to debug. */ 536 * won't stick. And it will take days to debug. */
556 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE | 537 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@ -1253,6 +1234,7 @@ void igb_reset(struct igb_adapter *adapter)
1253 * To take effect CTRL.RST is required. 1234 * To take effect CTRL.RST is required.
1254 */ 1235 */
1255 switch (mac->type) { 1236 switch (mac->type) {
1237 case e1000_i350:
1256 case e1000_82580: 1238 case e1000_82580:
1257 pba = rd32(E1000_RXPBS); 1239 pba = rd32(E1000_RXPBS);
1258 pba = igb_rxpbs_adjust_82580(pba); 1240 pba = igb_rxpbs_adjust_82580(pba);
@@ -1826,6 +1808,7 @@ static void igb_init_hw_timer(struct igb_adapter *adapter)
1826 struct e1000_hw *hw = &adapter->hw; 1808 struct e1000_hw *hw = &adapter->hw;
1827 1809
1828 switch (hw->mac.type) { 1810 switch (hw->mac.type) {
1811 case e1000_i350:
1829 case e1000_82580: 1812 case e1000_82580:
1830 memset(&adapter->cycles, 0, sizeof(adapter->cycles)); 1813 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1831 adapter->cycles.read = igb_read_clock; 1814 adapter->cycles.read = igb_read_clock;
@@ -2339,6 +2322,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
2339 if (adapter->vfs_allocated_count) { 2322 if (adapter->vfs_allocated_count) {
2340 /* 82575 and 82576 supports 2 RSS queues for VMDq */ 2323 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2341 switch (hw->mac.type) { 2324 switch (hw->mac.type) {
2325 case e1000_i350:
2342 case e1000_82580: 2326 case e1000_82580:
2343 num_rx_queues = 1; 2327 num_rx_queues = 1;
2344 shift = 0; 2328 shift = 0;
@@ -2590,6 +2574,8 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
2590 E1000_SRRCTL_BSIZEPKT_SHIFT; 2574 E1000_SRRCTL_BSIZEPKT_SHIFT;
2591 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; 2575 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2592 } 2576 }
2577 if (hw->mac.type == e1000_82580)
2578 srrctl |= E1000_SRRCTL_TIMESTAMP;
2593 /* Only set Drop Enable if we are supporting multiple queues */ 2579 /* Only set Drop Enable if we are supporting multiple queues */
2594 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1) 2580 if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
2595 srrctl |= E1000_SRRCTL_DROP_EN; 2581 srrctl |= E1000_SRRCTL_DROP_EN;
@@ -2876,7 +2862,7 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2876{ 2862{
2877 struct igb_adapter *adapter = netdev_priv(netdev); 2863 struct igb_adapter *adapter = netdev_priv(netdev);
2878 struct e1000_hw *hw = &adapter->hw; 2864 struct e1000_hw *hw = &adapter->hw;
2879 struct dev_mc_list *mc_ptr; 2865 struct netdev_hw_addr *ha;
2880 u8 *mta_list; 2866 u8 *mta_list;
2881 int i; 2867 int i;
2882 2868
@@ -2893,8 +2879,8 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
2893 2879
2894 /* The shared function expects a packed array of only addresses. */ 2880 /* The shared function expects a packed array of only addresses. */
2895 i = 0; 2881 i = 0;
2896 netdev_for_each_mc_addr(mc_ptr, netdev) 2882 netdev_for_each_mc_addr(ha, netdev)
2897 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 2883 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
2898 2884
2899 igb_update_mc_addr_list(hw, mta_list, i); 2885 igb_update_mc_addr_list(hw, mta_list, i);
2900 kfree(mta_list); 2886 kfree(mta_list);
@@ -3920,6 +3906,9 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3920 * i.e. RXBUFFER_2048 --> size-4096 slab 3906 * i.e. RXBUFFER_2048 --> size-4096 slab
3921 */ 3907 */
3922 3908
3909 if (adapter->hw.mac.type == e1000_82580)
3910 max_frame += IGB_TS_HDR_LEN;
3911
3923 if (max_frame <= IGB_RXBUFFER_1024) 3912 if (max_frame <= IGB_RXBUFFER_1024)
3924 rx_buffer_len = IGB_RXBUFFER_1024; 3913 rx_buffer_len = IGB_RXBUFFER_1024;
3925 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE) 3914 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
@@ -3927,6 +3916,14 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3927 else 3916 else
3928 rx_buffer_len = IGB_RXBUFFER_128; 3917 rx_buffer_len = IGB_RXBUFFER_128;
3929 3918
3919 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
3920 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
3921 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
3922
3923 if ((adapter->hw.mac.type == e1000_82580) &&
3924 (rx_buffer_len == IGB_RXBUFFER_128))
3925 rx_buffer_len += IGB_RXBUFFER_64;
3926
3930 if (netif_running(netdev)) 3927 if (netif_running(netdev))
3931 igb_down(adapter); 3928 igb_down(adapter);
3932 3929
@@ -5143,7 +5140,7 @@ static inline void igb_rx_checksum_adv(struct igb_ring *ring,
5143 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err); 5140 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
5144} 5141}
5145 5142
5146static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr, 5143static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5147 struct sk_buff *skb) 5144 struct sk_buff *skb)
5148{ 5145{
5149 struct igb_adapter *adapter = q_vector->adapter; 5146 struct igb_adapter *adapter = q_vector->adapter;
@@ -5161,13 +5158,18 @@ static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
5161 * If nothing went wrong, then it should have a skb_shared_tx that we 5158 * If nothing went wrong, then it should have a skb_shared_tx that we
5162 * can turn into a skb_shared_hwtstamps. 5159 * can turn into a skb_shared_hwtstamps.
5163 */ 5160 */
5164 if (likely(!(staterr & E1000_RXDADV_STAT_TS))) 5161 if (staterr & E1000_RXDADV_STAT_TSIP) {
5165 return; 5162 u32 *stamp = (u32 *)skb->data;
5166 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) 5163 regval = le32_to_cpu(*(stamp + 2));
5167 return; 5164 regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
5165 skb_pull(skb, IGB_TS_HDR_LEN);
5166 } else {
5167 if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
5168 return;
5168 5169
5169 regval = rd32(E1000_RXSTMPL); 5170 regval = rd32(E1000_RXSTMPL);
5170 regval |= (u64)rd32(E1000_RXSTMPH) << 32; 5171 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
5172 }
5171 5173
5172 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); 5174 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
5173} 5175}
@@ -5275,7 +5277,8 @@ send_up:
5275 goto next_desc; 5277 goto next_desc;
5276 } 5278 }
5277 5279
5278 igb_rx_hwtstamp(q_vector, staterr, skb); 5280 if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
5281 igb_rx_hwtstamp(q_vector, staterr, skb);
5279 total_bytes += skb->len; 5282 total_bytes += skb->len;
5280 total_packets++; 5283 total_packets++;
5281 5284
@@ -5555,6 +5558,16 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
5555 return 0; 5558 return 0;
5556 } 5559 }
5557 5560
5561 /*
5562 * Per-packet timestamping only works if all packets are
5563 * timestamped, so enable timestamping in all packets as
5564 * long as one rx filter was configured.
5565 */
5566 if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
5567 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
5568 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
5569 }
5570
5558 /* enable/disable TX */ 5571 /* enable/disable TX */
5559 regval = rd32(E1000_TSYNCTXCTL); 5572 regval = rd32(E1000_TSYNCTXCTL);
5560 regval &= ~E1000_TSYNCTXCTL_ENABLED; 5573 regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@ -6131,19 +6144,25 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6131 struct e1000_hw *hw = &adapter->hw; 6144 struct e1000_hw *hw = &adapter->hw;
6132 u32 reg; 6145 u32 reg;
6133 6146
6134 /* replication is not supported for 82575 */ 6147 switch (hw->mac.type) {
6135 if (hw->mac.type == e1000_82575) 6148 case e1000_82575:
6149 default:
6150 /* replication is not supported for 82575 */
6136 return; 6151 return;
6137 6152 case e1000_82576:
6138 /* enable replication vlan tag stripping */ 6153 /* notify HW that the MAC is adding vlan tags */
6139 reg = rd32(E1000_RPLOLR); 6154 reg = rd32(E1000_DTXCTL);
6140 reg |= E1000_RPLOLR_STRVLAN; 6155 reg |= E1000_DTXCTL_VLAN_ADDED;
6141 wr32(E1000_RPLOLR, reg); 6156 wr32(E1000_DTXCTL, reg);
6142 6157 case e1000_82580:
6143 /* notify HW that the MAC is adding vlan tags */ 6158 /* enable replication vlan tag stripping */
6144 reg = rd32(E1000_DTXCTL); 6159 reg = rd32(E1000_RPLOLR);
6145 reg |= E1000_DTXCTL_VLAN_ADDED; 6160 reg |= E1000_RPLOLR_STRVLAN;
6146 wr32(E1000_DTXCTL, reg); 6161 wr32(E1000_RPLOLR, reg);
6162 case e1000_i350:
6163 /* none of the above registers are supported by i350 */
6164 break;
6165 }
6147 6166
6148 if (adapter->vfs_allocated_count) { 6167 if (adapter->vfs_allocated_count) {
6149 igb_vmdq_set_loopback_pf(hw, true); 6168 igb_vmdq_set_loopback_pf(hw, true);
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 1b1edad1eb5e..cc2309027e6a 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1398,7 +1398,7 @@ static void igbvf_set_multi(struct net_device *netdev)
1398{ 1398{
1399 struct igbvf_adapter *adapter = netdev_priv(netdev); 1399 struct igbvf_adapter *adapter = netdev_priv(netdev);
1400 struct e1000_hw *hw = &adapter->hw; 1400 struct e1000_hw *hw = &adapter->hw;
1401 struct dev_mc_list *mc_ptr; 1401 struct netdev_hw_addr *ha;
1402 u8 *mta_list = NULL; 1402 u8 *mta_list = NULL;
1403 int i; 1403 int i;
1404 1404
@@ -1413,8 +1413,8 @@ static void igbvf_set_multi(struct net_device *netdev)
1413 1413
1414 /* prepare a packed array of only addresses. */ 1414 /* prepare a packed array of only addresses. */
1415 i = 0; 1415 i = 0;
1416 netdev_for_each_mc_addr(mc_ptr, netdev) 1416 netdev_for_each_mc_addr(ha, netdev)
1417 memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN); 1417 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
1418 1418
1419 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0); 1419 hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
1420 kfree(mta_list); 1420 kfree(mta_list);
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8f6197d647c0..091ea3377ed0 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1665,7 +1665,7 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1665 1665
1666static void ioc3_set_multicast_list(struct net_device *dev) 1666static void ioc3_set_multicast_list(struct net_device *dev)
1667{ 1667{
1668 struct dev_mc_list *dmi; 1668 struct netdev_hw_addr *ha;
1669 struct ioc3_private *ip = netdev_priv(dev); 1669 struct ioc3_private *ip = netdev_priv(dev);
1670 struct ioc3 *ioc3 = ip->regs; 1670 struct ioc3 *ioc3 = ip->regs;
1671 u64 ehar = 0; 1671 u64 ehar = 0;
@@ -1689,8 +1689,8 @@ static void ioc3_set_multicast_list(struct net_device *dev)
1689 ip->ehar_h = 0xffffffff; 1689 ip->ehar_h = 0xffffffff;
1690 ip->ehar_l = 0xffffffff; 1690 ip->ehar_l = 0xffffffff;
1691 } else { 1691 } else {
1692 netdev_for_each_mc_addr(dmi, dev) { 1692 netdev_for_each_mc_addr(ha, dev) {
1693 char *addr = dmi->dmi_addr; 1693 char *addr = ha->addr;
1694 1694
1695 if (!(*addr & 1)) 1695 if (!(*addr & 1))
1696 continue; 1696 continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 639bf9fb0279..72e3d2da9e9f 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -570,7 +570,7 @@ static int ipg_config_autoneg(struct net_device *dev)
570static void ipg_nic_set_multicast_list(struct net_device *dev) 570static void ipg_nic_set_multicast_list(struct net_device *dev)
571{ 571{
572 void __iomem *ioaddr = ipg_ioaddr(dev); 572 void __iomem *ioaddr = ipg_ioaddr(dev);
573 struct dev_mc_list *mc_list_ptr; 573 struct netdev_hw_addr *ha;
574 unsigned int hashindex; 574 unsigned int hashindex;
575 u32 hashtable[2]; 575 u32 hashtable[2];
576 u8 receivemode; 576 u8 receivemode;
@@ -609,9 +609,9 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
609 hashtable[1] = 0x00000000; 609 hashtable[1] = 0x00000000;
610 610
611 /* Cycle through all multicast addresses to filter. */ 611 /* Cycle through all multicast addresses to filter. */
612 netdev_for_each_mc_addr(mc_list_ptr, dev) { 612 netdev_for_each_mc_addr(ha, dev) {
613 /* Calculate CRC result for each multicast address. */ 613 /* Calculate CRC result for each multicast address. */
614 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, 614 hashindex = crc32_le(0xffffffff, ha->addr,
615 ETH_ALEN); 615 ETH_ALEN);
616 616
617 /* Use only the least significant 6 bits. */ 617 /* Use only the least significant 6 bits. */
@@ -1548,8 +1548,6 @@ static void ipg_reset_after_host_error(struct work_struct *work)
1548 container_of(work, struct ipg_nic_private, task.work); 1548 container_of(work, struct ipg_nic_private, task.work);
1549 struct net_device *dev = sp->dev; 1549 struct net_device *dev = sp->dev;
1550 1550
1551 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL));
1552
1553 /* 1551 /*
1554 * Acknowledge HostError interrupt by resetting 1552 * Acknowledge HostError interrupt by resetting
1555 * IPG DMA and HOST. 1553 * IPG DMA and HOST.
@@ -1826,9 +1824,6 @@ static int ipg_nic_stop(struct net_device *dev)
1826 1824
1827 netif_stop_queue(dev); 1825 netif_stop_queue(dev);
1828 1826
1829 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount);
1830 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount);
1831 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount);
1832 IPG_DUMPTFDLIST(dev); 1827 IPG_DUMPTFDLIST(dev);
1833 1828
1834 do { 1829 do {
diff --git a/drivers/net/ipg.h b/drivers/net/ipg.h
index dfc2541bb556..6ce027355fcf 100644
--- a/drivers/net/ipg.h
+++ b/drivers/net/ipg.h
@@ -29,7 +29,7 @@
29/* GMII based PHY IDs */ 29/* GMII based PHY IDs */
30#define NS 0x2000 30#define NS 0x2000
31#define MARVELL 0x0141 31#define MARVELL 0x0141
32#define ICPLUS_PHY 0x243 32#define ICPLUS_PHY 0x243
33 33
34/* NIC Physical Layer Device MII register fields. */ 34/* NIC Physical Layer Device MII register fields. */
35#define MII_PHY_SELECTOR_IEEE8023 0x0001 35#define MII_PHY_SELECTOR_IEEE8023 0x0001
@@ -96,31 +96,31 @@ enum ipg_regs {
96}; 96};
97 97
98/* Ethernet MIB statistic register offsets. */ 98/* Ethernet MIB statistic register offsets. */
99#define IPG_OCTETRCVOK 0xA8 99#define IPG_OCTETRCVOK 0xA8
100#define IPG_MCSTOCTETRCVDOK 0xAC 100#define IPG_MCSTOCTETRCVDOK 0xAC
101#define IPG_BCSTOCTETRCVOK 0xB0 101#define IPG_BCSTOCTETRCVOK 0xB0
102#define IPG_FRAMESRCVDOK 0xB4 102#define IPG_FRAMESRCVDOK 0xB4
103#define IPG_MCSTFRAMESRCVDOK 0xB8 103#define IPG_MCSTFRAMESRCVDOK 0xB8
104#define IPG_BCSTFRAMESRCVDOK 0xBE 104#define IPG_BCSTFRAMESRCVDOK 0xBE
105#define IPG_MACCONTROLFRAMESRCVD 0xC6 105#define IPG_MACCONTROLFRAMESRCVD 0xC6
106#define IPG_FRAMETOOLONGERRRORS 0xC8 106#define IPG_FRAMETOOLONGERRRORS 0xC8
107#define IPG_INRANGELENGTHERRORS 0xCA 107#define IPG_INRANGELENGTHERRORS 0xCA
108#define IPG_FRAMECHECKSEQERRORS 0xCC 108#define IPG_FRAMECHECKSEQERRORS 0xCC
109#define IPG_FRAMESLOSTRXERRORS 0xCE 109#define IPG_FRAMESLOSTRXERRORS 0xCE
110#define IPG_OCTETXMTOK 0xD0 110#define IPG_OCTETXMTOK 0xD0
111#define IPG_MCSTOCTETXMTOK 0xD4 111#define IPG_MCSTOCTETXMTOK 0xD4
112#define IPG_BCSTOCTETXMTOK 0xD8 112#define IPG_BCSTOCTETXMTOK 0xD8
113#define IPG_FRAMESXMTDOK 0xDC 113#define IPG_FRAMESXMTDOK 0xDC
114#define IPG_MCSTFRAMESXMTDOK 0xE0 114#define IPG_MCSTFRAMESXMTDOK 0xE0
115#define IPG_FRAMESWDEFERREDXMT 0xE4 115#define IPG_FRAMESWDEFERREDXMT 0xE4
116#define IPG_LATECOLLISIONS 0xE8 116#define IPG_LATECOLLISIONS 0xE8
117#define IPG_MULTICOLFRAMES 0xEC 117#define IPG_MULTICOLFRAMES 0xEC
118#define IPG_SINGLECOLFRAMES 0xF0 118#define IPG_SINGLECOLFRAMES 0xF0
119#define IPG_BCSTFRAMESXMTDOK 0xF6 119#define IPG_BCSTFRAMESXMTDOK 0xF6
120#define IPG_CARRIERSENSEERRORS 0xF8 120#define IPG_CARRIERSENSEERRORS 0xF8
121#define IPG_MACCONTROLFRAMESXMTDOK 0xFA 121#define IPG_MACCONTROLFRAMESXMTDOK 0xFA
122#define IPG_FRAMESABORTXSCOLLS 0xFC 122#define IPG_FRAMESABORTXSCOLLS 0xFC
123#define IPG_FRAMESWEXDEFERRAL 0xFE 123#define IPG_FRAMESWEXDEFERRAL 0xFE
124 124
125/* RMON statistic register offsets. */ 125/* RMON statistic register offsets. */
126#define IPG_ETHERSTATSCOLLISIONS 0x100 126#define IPG_ETHERSTATSCOLLISIONS 0x100
@@ -134,8 +134,8 @@ enum ipg_regs {
134#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120 134#define IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT 0x120
135#define IPG_ETHERSTATSCRCALIGNERRORS 0x124 135#define IPG_ETHERSTATSCRCALIGNERRORS 0x124
136#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128 136#define IPG_ETHERSTATSUNDERSIZEPKTS 0x128
137#define IPG_ETHERSTATSFRAGMENTS 0x12C 137#define IPG_ETHERSTATSFRAGMENTS 0x12C
138#define IPG_ETHERSTATSJABBERS 0x130 138#define IPG_ETHERSTATSJABBERS 0x130
139#define IPG_ETHERSTATSOCTETS 0x134 139#define IPG_ETHERSTATSOCTETS 0x134
140#define IPG_ETHERSTATSPKTS 0x138 140#define IPG_ETHERSTATSPKTS 0x138
141#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C 141#define IPG_ETHERSTATSPKTS64OCTESTS 0x13C
@@ -154,10 +154,10 @@ enum ipg_regs {
154#define IPG_ETHERSTATSDROPEVENTS 0xCE 154#define IPG_ETHERSTATSDROPEVENTS 0xCE
155 155
156/* Serial EEPROM offsets */ 156/* Serial EEPROM offsets */
157#define IPG_EEPROM_CONFIGPARAM 0x00 157#define IPG_EEPROM_CONFIGPARAM 0x00
158#define IPG_EEPROM_ASICCTRL 0x01 158#define IPG_EEPROM_ASICCTRL 0x01
159#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02 159#define IPG_EEPROM_SUBSYSTEMVENDORID 0x02
160#define IPG_EEPROM_SUBSYSTEMID 0x03 160#define IPG_EEPROM_SUBSYSTEMID 0x03
161#define IPG_EEPROM_STATIONADDRESS0 0x10 161#define IPG_EEPROM_STATIONADDRESS0 0x10
162#define IPG_EEPROM_STATIONADDRESS1 0x11 162#define IPG_EEPROM_STATIONADDRESS1 0x11
163#define IPG_EEPROM_STATIONADDRESS2 0x12 163#define IPG_EEPROM_STATIONADDRESS2 0x12
@@ -168,16 +168,16 @@ enum ipg_regs {
168 168
169/* IOBaseAddress */ 169/* IOBaseAddress */
170#define IPG_PIB_RSVD_MASK 0xFFFFFE01 170#define IPG_PIB_RSVD_MASK 0xFFFFFE01
171#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00 171#define IPG_PIB_IOBASEADDRESS 0xFFFFFF00
172#define IPG_PIB_IOBASEADDRIND 0x00000001 172#define IPG_PIB_IOBASEADDRIND 0x00000001
173 173
174/* MemBaseAddress */ 174/* MemBaseAddress */
175#define IPG_PMB_RSVD_MASK 0xFFFFFE07 175#define IPG_PMB_RSVD_MASK 0xFFFFFE07
176#define IPG_PMB_MEMBASEADDRIND 0x00000001 176#define IPG_PMB_MEMBASEADDRIND 0x00000001
177#define IPG_PMB_MEMMAPTYPE 0x00000006 177#define IPG_PMB_MEMMAPTYPE 0x00000006
178#define IPG_PMB_MEMMAPTYPE0 0x00000002 178#define IPG_PMB_MEMMAPTYPE0 0x00000002
179#define IPG_PMB_MEMMAPTYPE1 0x00000004 179#define IPG_PMB_MEMMAPTYPE1 0x00000004
180#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00 180#define IPG_PMB_MEMBASEADDRESS 0xFFFFFE00
181 181
182/* ConfigStatus */ 182/* ConfigStatus */
183#define IPG_CS_RSVD_MASK 0xFFB0 183#define IPG_CS_RSVD_MASK 0xFFB0
@@ -196,20 +196,20 @@ enum ipg_regs {
196 196
197/* TFDList, TFC */ 197/* TFDList, TFC */
198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF 198#define IPG_TFC_RSVD_MASK 0x0000FFFF9FFFFFFF
199#define IPG_TFC_FRAMEID 0x000000000000FFFF 199#define IPG_TFC_FRAMEID 0x000000000000FFFF
200#define IPG_TFC_WORDALIGN 0x0000000000030000 200#define IPG_TFC_WORDALIGN 0x0000000000030000
201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000 201#define IPG_TFC_WORDALIGNTODWORD 0x0000000000000000
202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000 202#define IPG_TFC_WORDALIGNTOWORD 0x0000000000020000
203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000 203#define IPG_TFC_WORDALIGNDISABLED 0x0000000000030000
204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000 204#define IPG_TFC_TCPCHECKSUMENABLE 0x0000000000040000
205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000 205#define IPG_TFC_UDPCHECKSUMENABLE 0x0000000000080000
206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000 206#define IPG_TFC_IPCHECKSUMENABLE 0x0000000000100000
207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000 207#define IPG_TFC_FCSAPPENDDISABLE 0x0000000000200000
208#define IPG_TFC_TXINDICATE 0x0000000000400000 208#define IPG_TFC_TXINDICATE 0x0000000000400000
209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000 209#define IPG_TFC_TXDMAINDICATE 0x0000000000800000
210#define IPG_TFC_FRAGCOUNT 0x000000000F000000 210#define IPG_TFC_FRAGCOUNT 0x000000000F000000
211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000 211#define IPG_TFC_VLANTAGINSERT 0x0000000010000000
212#define IPG_TFC_TFDDONE 0x0000000080000000 212#define IPG_TFC_TFDDONE 0x0000000080000000
213#define IPG_TFC_VID 0x00000FFF00000000 213#define IPG_TFC_VID 0x00000FFF00000000
214#define IPG_TFC_CFI 0x0000100000000000 214#define IPG_TFC_CFI 0x0000100000000000
215#define IPG_TFC_USERPRIORITY 0x0000E00000000000 215#define IPG_TFC_USERPRIORITY 0x0000E00000000000
@@ -217,35 +217,35 @@ enum ipg_regs {
217/* TFDList, FragInfo */ 217/* TFDList, FragInfo */
218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 218#define IPG_TFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF 219#define IPG_TFI_FRAGADDR 0x000000FFFFFFFFFF
220#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL 220#define IPG_TFI_FRAGLEN 0xFFFF000000000000LL
221 221
222/* RFD data structure masks. */ 222/* RFD data structure masks. */
223 223
224/* RFDList, RFS */ 224/* RFDList, RFS */
225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF 225#define IPG_RFS_RSVD_MASK 0x0000FFFFFFFFFFFF
226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF 226#define IPG_RFS_RXFRAMELEN 0x000000000000FFFF
227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000 227#define IPG_RFS_RXFIFOOVERRUN 0x0000000000010000
228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000 228#define IPG_RFS_RXRUNTFRAME 0x0000000000020000
229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000 229#define IPG_RFS_RXALIGNMENTERROR 0x0000000000040000
230#define IPG_RFS_RXFCSERROR 0x0000000000080000 230#define IPG_RFS_RXFCSERROR 0x0000000000080000
231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000 231#define IPG_RFS_RXOVERSIZEDFRAME 0x0000000000100000
232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000 232#define IPG_RFS_RXLENGTHERROR 0x0000000000200000
233#define IPG_RFS_VLANDETECTED 0x0000000000400000 233#define IPG_RFS_VLANDETECTED 0x0000000000400000
234#define IPG_RFS_TCPDETECTED 0x0000000000800000 234#define IPG_RFS_TCPDETECTED 0x0000000000800000
235#define IPG_RFS_TCPERROR 0x0000000001000000 235#define IPG_RFS_TCPERROR 0x0000000001000000
236#define IPG_RFS_UDPDETECTED 0x0000000002000000 236#define IPG_RFS_UDPDETECTED 0x0000000002000000
237#define IPG_RFS_UDPERROR 0x0000000004000000 237#define IPG_RFS_UDPERROR 0x0000000004000000
238#define IPG_RFS_IPDETECTED 0x0000000008000000 238#define IPG_RFS_IPDETECTED 0x0000000008000000
239#define IPG_RFS_IPERROR 0x0000000010000000 239#define IPG_RFS_IPERROR 0x0000000010000000
240#define IPG_RFS_FRAMESTART 0x0000000020000000 240#define IPG_RFS_FRAMESTART 0x0000000020000000
241#define IPG_RFS_FRAMEEND 0x0000000040000000 241#define IPG_RFS_FRAMEEND 0x0000000040000000
242#define IPG_RFS_RFDDONE 0x0000000080000000 242#define IPG_RFS_RFDDONE 0x0000000080000000
243#define IPG_RFS_TCI 0x0000FFFF00000000 243#define IPG_RFS_TCI 0x0000FFFF00000000
244 244
245/* RFDList, FragInfo */ 245/* RFDList, FragInfo */
246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF 246#define IPG_RFI_RSVD_MASK 0xFFFF00FFFFFFFFFF
247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF 247#define IPG_RFI_FRAGADDR 0x000000FFFFFFFFFF
248#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL 248#define IPG_RFI_FRAGLEN 0xFFFF000000000000LL
249 249
250/* I/O Register masks. */ 250/* I/O Register masks. */
251 251
@@ -254,37 +254,37 @@ enum ipg_regs {
254 254
255/* Statistics Mask */ 255/* Statistics Mask */
256#define IPG_SM_ALL 0x0FFFFFFF 256#define IPG_SM_ALL 0x0FFFFFFF
257#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001 257#define IPG_SM_OCTETRCVOK_FRAMESRCVDOK 0x00000001
258#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002 258#define IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK 0x00000002
259#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004 259#define IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK 0x00000004
260#define IPG_SM_RXJUMBOFRAMES 0x00000008 260#define IPG_SM_RXJUMBOFRAMES 0x00000008
261#define IPG_SM_TCPCHECKSUMERRORS 0x00000010 261#define IPG_SM_TCPCHECKSUMERRORS 0x00000010
262#define IPG_SM_IPCHECKSUMERRORS 0x00000020 262#define IPG_SM_IPCHECKSUMERRORS 0x00000020
263#define IPG_SM_UDPCHECKSUMERRORS 0x00000040 263#define IPG_SM_UDPCHECKSUMERRORS 0x00000040
264#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080 264#define IPG_SM_MACCONTROLFRAMESRCVD 0x00000080
265#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100 265#define IPG_SM_FRAMESTOOLONGERRORS 0x00000100
266#define IPG_SM_INRANGELENGTHERRORS 0x00000200 266#define IPG_SM_INRANGELENGTHERRORS 0x00000200
267#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400 267#define IPG_SM_FRAMECHECKSEQERRORS 0x00000400
268#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800 268#define IPG_SM_FRAMESLOSTRXERRORS 0x00000800
269#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000 269#define IPG_SM_OCTETXMTOK_FRAMESXMTOK 0x00001000
270#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000 270#define IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK 0x00002000
271#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000 271#define IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK 0x00004000
272#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000 272#define IPG_SM_FRAMESWDEFERREDXMT 0x00008000
273#define IPG_SM_LATECOLLISIONS 0x00010000 273#define IPG_SM_LATECOLLISIONS 0x00010000
274#define IPG_SM_MULTICOLFRAMES 0x00020000 274#define IPG_SM_MULTICOLFRAMES 0x00020000
275#define IPG_SM_SINGLECOLFRAMES 0x00040000 275#define IPG_SM_SINGLECOLFRAMES 0x00040000
276#define IPG_SM_TXJUMBOFRAMES 0x00080000 276#define IPG_SM_TXJUMBOFRAMES 0x00080000
277#define IPG_SM_CARRIERSENSEERRORS 0x00100000 277#define IPG_SM_CARRIERSENSEERRORS 0x00100000
278#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000 278#define IPG_SM_MACCONTROLFRAMESXMTD 0x00200000
279#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000 279#define IPG_SM_FRAMESABORTXSCOLLS 0x00400000
280#define IPG_SM_FRAMESWEXDEFERAL 0x00800000 280#define IPG_SM_FRAMESWEXDEFERAL 0x00800000
281 281
282/* Countdown */ 282/* Countdown */
283#define IPG_CD_RSVD_MASK 0x0700FFFF 283#define IPG_CD_RSVD_MASK 0x0700FFFF
284#define IPG_CD_COUNT 0x0000FFFF 284#define IPG_CD_COUNT 0x0000FFFF
285#define IPG_CD_COUNTDOWNSPEED 0x01000000 285#define IPG_CD_COUNTDOWNSPEED 0x01000000
286#define IPG_CD_COUNTDOWNMODE 0x02000000 286#define IPG_CD_COUNTDOWNMODE 0x02000000
287#define IPG_CD_COUNTINTENABLED 0x04000000 287#define IPG_CD_COUNTINTENABLED 0x04000000
288 288
289/* TxDMABurstThresh */ 289/* TxDMABurstThresh */
290#define IPG_TB_RSVD_MASK 0xFF 290#define IPG_TB_RSVD_MASK 0xFF
@@ -653,15 +653,28 @@ enum ipg_regs {
653 * Miscellaneous macros. 653 * Miscellaneous macros.
654 */ 654 */
655 655
656/* Marco for printing debug statements. */ 656/* Macros for printing debug statements. */
657#ifdef IPG_DEBUG 657#ifdef IPG_DEBUG
658# define IPG_DEBUG_MSG(args...) 658# define IPG_DEBUG_MSG(fmt, args...) \
659# define IPG_DDEBUG_MSG(args...) printk(KERN_DEBUG "IPG: " args) 659do { \
660 if (0) \
661 printk(KERN_DEBUG "IPG: " fmt, ##args); \
662} while (0)
663# define IPG_DDEBUG_MSG(fmt, args...) \
664 printk(KERN_DEBUG "IPG: " fmt, ##args)
660# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args) 665# define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
661# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args) 666# define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
662#else 667#else
663# define IPG_DEBUG_MSG(args...) 668# define IPG_DEBUG_MSG(fmt, args...) \
664# define IPG_DDEBUG_MSG(args...) 669do { \
670 if (0) \
671 printk(KERN_DEBUG "IPG: " fmt, ##args); \
672} while (0)
673# define IPG_DDEBUG_MSG(fmt, args...) \
674do { \
675 if (0) \
676 printk(KERN_DEBUG "IPG: " fmt, ##args); \
677} while (0)
665# define IPG_DUMPRFDLIST(args) 678# define IPG_DUMPRFDLIST(args)
666# define IPG_DUMPTFDLIST(args) 679# define IPG_DUMPTFDLIST(args)
667#endif 680#endif
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index af10e97345ce..25bb2a015e18 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -397,5 +397,11 @@ config MCS_FIR
397 To compile it as a module, choose M here: the module will be called 397 To compile it as a module, choose M here: the module will be called
398 mcs7780. 398 mcs7780.
399 399
400config SH_IRDA
401 tristate "SuperH IrDA driver"
402 depends on IRDA && ARCH_SHMOBILE
403 help
404 Say Y here if your want to enable SuperH IrDA devices.
405
400endmenu 406endmenu
401 407
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index e030d47e2793..dfc64537f62f 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_VIA_FIR) += via-ircc.o
19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o 19obj-$(CONFIG_PXA_FICP) += pxaficp_ir.o
20obj-$(CONFIG_MCS_FIR) += mcs7780.o 20obj-$(CONFIG_MCS_FIR) += mcs7780.o
21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o 21obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
22obj-$(CONFIG_SH_IRDA) += sh_irda.o
22# SIR drivers 23# SIR drivers
23obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o 24obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
24obj-$(CONFIG_BFIN_SIR) += bfin_sir.o 25obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c
index 28992c815cba..a3cb109006a5 100644
--- a/drivers/net/irda/ali-ircc.c
+++ b/drivers/net/irda/ali-ircc.c
@@ -753,18 +753,18 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
753 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 753 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
754 { 754 {
755 self->rcvFramesOverflow = TRUE; 755 self->rcvFramesOverflow = TRUE;
756 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******** \n", __func__); 756 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ********\n", __func__);
757 } 757 }
758 758
759 if (ali_ircc_dma_receive_complete(self)) 759 if (ali_ircc_dma_receive_complete(self))
760 { 760 {
761 IRDA_DEBUG(1, "%s(), ******* receive complete ******** \n", __func__); 761 IRDA_DEBUG(1, "%s(), ******* receive complete ********\n", __func__);
762 762
763 self->ier = IER_EOM; 763 self->ier = IER_EOM;
764 } 764 }
765 else 765 else
766 { 766 {
767 IRDA_DEBUG(1, "%s(), ******* Not receive complete ******** \n", __func__); 767 IRDA_DEBUG(1, "%s(), ******* Not receive complete ********\n", __func__);
768 768
769 self->ier = IER_EOM | IER_TIMER; 769 self->ier = IER_EOM | IER_TIMER;
770 } 770 }
@@ -777,7 +777,7 @@ static irqreturn_t ali_ircc_fir_interrupt(struct ali_ircc_cb *self)
777 if(OldMessageCount > ((self->LineStatus+1) & 0x07)) 777 if(OldMessageCount > ((self->LineStatus+1) & 0x07))
778 { 778 {
779 self->rcvFramesOverflow = TRUE; 779 self->rcvFramesOverflow = TRUE;
780 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE ******* \n", __func__); 780 IRDA_DEBUG(1, "%s(), ******* self->rcvFramesOverflow = TRUE *******\n", __func__);
781 } 781 }
782 /* Disable Timer */ 782 /* Disable Timer */
783 switch_bank(iobase, BANK1); 783 switch_bank(iobase, BANK1);
@@ -942,7 +942,7 @@ static void ali_ircc_sir_write_wakeup(struct ali_ircc_cb *self)
942 // benjamin 2000/11/10 06:32PM 942 // benjamin 2000/11/10 06:32PM
943 if (self->io.speed > 115200) 943 if (self->io.speed > 115200)
944 { 944 {
945 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT \n", __func__ ); 945 IRDA_DEBUG(2, "%s(), ali_ircc_change_speed from UART_LSR_TEMT\n", __func__ );
946 946
947 self->ier = IER_EOM; 947 self->ier = IER_EOM;
948 // SetCOMInterrupts(self, TRUE); 948 // SetCOMInterrupts(self, TRUE);
@@ -970,7 +970,7 @@ static void ali_ircc_change_speed(struct ali_ircc_cb *self, __u32 baud)
970 970
971 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ ); 971 IRDA_DEBUG(1, "%s(), ---------------- Start ----------------\n", __func__ );
972 972
973 IRDA_DEBUG(2, "%s(), setting speed = %d \n", __func__ , baud); 973 IRDA_DEBUG(2, "%s(), setting speed = %d\n", __func__ , baud);
974 974
975 /* This function *must* be called with irq off and spin-lock. 975 /* This function *must* be called with irq off and spin-lock.
976 * - Jean II */ 976 * - Jean II */
@@ -1500,7 +1500,7 @@ static netdev_tx_t ali_ircc_fir_hard_xmit(struct sk_buff *skb,
1500 diff = self->now.tv_usec - self->stamp.tv_usec; 1500 diff = self->now.tv_usec - self->stamp.tv_usec;
1501 /* self->stamp is set from ali_ircc_dma_receive_complete() */ 1501 /* self->stamp is set from ali_ircc_dma_receive_complete() */
1502 1502
1503 IRDA_DEBUG(1, "%s(), ******* diff = %d ******* \n", __func__ , diff); 1503 IRDA_DEBUG(1, "%s(), ******* diff = %d *******\n", __func__ , diff);
1504 1504
1505 if (diff < 0) 1505 if (diff < 0)
1506 diff += 1000000; 1506 diff += 1000000;
@@ -1641,7 +1641,7 @@ static void ali_ircc_dma_xmit(struct ali_ircc_cb *self)
1641 tmp = inb(iobase+FIR_LCR_B); 1641 tmp = inb(iobase+FIR_LCR_B);
1642 tmp &= ~0x20; // Disable SIP 1642 tmp &= ~0x20; // Disable SIP
1643 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B); 1643 outb(((unsigned char)(tmp & 0x3f) | LCR_B_TX_MODE) & ~LCR_B_BW, iobase+FIR_LCR_B);
1644 IRDA_DEBUG(1, "%s(), ******* Change to TX mode: FIR_LCR_B = 0x%x ******* \n", __func__ , inb(iobase+FIR_LCR_B)); 1644 IRDA_DEBUG(1, "%s(), *** Change to TX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1645 1645
1646 outb(0, iobase+FIR_LSR); 1646 outb(0, iobase+FIR_LSR);
1647 1647
@@ -1768,7 +1768,7 @@ static int ali_ircc_dma_receive(struct ali_ircc_cb *self)
1768 //switch_bank(iobase, BANK0); 1768 //switch_bank(iobase, BANK0);
1769 tmp = inb(iobase+FIR_LCR_B); 1769 tmp = inb(iobase+FIR_LCR_B);
1770 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM 1770 outb((unsigned char)(tmp &0x3f) | LCR_B_RX_MODE | LCR_B_BW , iobase + FIR_LCR_B); // 2000/12/1 05:16PM
1771 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x *** \n", __func__ , inb(iobase+FIR_LCR_B)); 1771 IRDA_DEBUG(1, "%s(), *** Change To RX mode: FIR_LCR_B = 0x%x ***\n", __func__ , inb(iobase+FIR_LCR_B));
1772 1772
1773 /* Set Rx Threshold */ 1773 /* Set Rx Threshold */
1774 switch_bank(iobase, BANK1); 1774 switch_bank(iobase, BANK1);
@@ -1840,7 +1840,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1840 /* Check for errors */ 1840 /* Check for errors */
1841 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0)) 1841 if ((status & 0xd8) || self->rcvFramesOverflow || (len==0))
1842 { 1842 {
1843 IRDA_DEBUG(0,"%s(), ************* RX Errors ************ \n", __func__ ); 1843 IRDA_DEBUG(0,"%s(), ************* RX Errors ************\n", __func__ );
1844 1844
1845 /* Skip frame */ 1845 /* Skip frame */
1846 self->netdev->stats.rx_errors++; 1846 self->netdev->stats.rx_errors++;
@@ -1850,29 +1850,29 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1850 if (status & LSR_FIFO_UR) 1850 if (status & LSR_FIFO_UR)
1851 { 1851 {
1852 self->netdev->stats.rx_frame_errors++; 1852 self->netdev->stats.rx_frame_errors++;
1853 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************ \n", __func__ ); 1853 IRDA_DEBUG(0,"%s(), ************* FIFO Errors ************\n", __func__ );
1854 } 1854 }
1855 if (status & LSR_FRAME_ERROR) 1855 if (status & LSR_FRAME_ERROR)
1856 { 1856 {
1857 self->netdev->stats.rx_frame_errors++; 1857 self->netdev->stats.rx_frame_errors++;
1858 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************ \n", __func__ ); 1858 IRDA_DEBUG(0,"%s(), ************* FRAME Errors ************\n", __func__ );
1859 } 1859 }
1860 1860
1861 if (status & LSR_CRC_ERROR) 1861 if (status & LSR_CRC_ERROR)
1862 { 1862 {
1863 self->netdev->stats.rx_crc_errors++; 1863 self->netdev->stats.rx_crc_errors++;
1864 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************ \n", __func__ ); 1864 IRDA_DEBUG(0,"%s(), ************* CRC Errors ************\n", __func__ );
1865 } 1865 }
1866 1866
1867 if(self->rcvFramesOverflow) 1867 if(self->rcvFramesOverflow)
1868 { 1868 {
1869 self->netdev->stats.rx_frame_errors++; 1869 self->netdev->stats.rx_frame_errors++;
1870 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************ \n", __func__ ); 1870 IRDA_DEBUG(0,"%s(), ************* Overran DMA buffer ************\n", __func__ );
1871 } 1871 }
1872 if(len == 0) 1872 if(len == 0)
1873 { 1873 {
1874 self->netdev->stats.rx_frame_errors++; 1874 self->netdev->stats.rx_frame_errors++;
1875 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 ********* \n", __func__ ); 1875 IRDA_DEBUG(0,"%s(), ********** Receive Frame Size = 0 *********\n", __func__ );
1876 } 1876 }
1877 } 1877 }
1878 else 1878 else
@@ -1884,7 +1884,7 @@ static int ali_ircc_dma_receive_complete(struct ali_ircc_cb *self)
1884 val = inb(iobase+FIR_BSR); 1884 val = inb(iobase+FIR_BSR);
1885 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80) 1885 if ((val& BSR_FIFO_NOT_EMPTY)== 0x80)
1886 { 1886 {
1887 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************ \n", __func__ ); 1887 IRDA_DEBUG(0, "%s(), ************* BSR_FIFO_NOT_EMPTY ************\n", __func__ );
1888 1888
1889 /* Put this entry back in fifo */ 1889 /* Put this entry back in fifo */
1890 st_fifo->head--; 1890 st_fifo->head--;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index 2c9b3af16612..4441fa3389c2 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -839,7 +839,7 @@ static void irda_usb_receive(struct urb *urb)
839 /* Usually precursor to a hot-unplug on OHCI. */ 839 /* Usually precursor to a hot-unplug on OHCI. */
840 default: 840 default:
841 self->netdev->stats.rx_errors++; 841 self->netdev->stats.rx_errors++;
842 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X \n", __func__, urb->status, urb->transfer_flags); 842 IRDA_DEBUG(0, "%s(), RX status %d, transfer_flags 0x%04X\n", __func__, urb->status, urb->transfer_flags);
843 break; 843 break;
844 } 844 }
845 /* If we received an error, we don't want to resubmit the 845 /* If we received an error, we don't want to resubmit the
diff --git a/drivers/net/irda/sh_irda.c b/drivers/net/irda/sh_irda.c
new file mode 100644
index 000000000000..9a828b06a57e
--- /dev/null
+++ b/drivers/net/irda/sh_irda.c
@@ -0,0 +1,865 @@
1/*
2 * SuperH IrDA Driver
3 *
4 * Copyright (C) 2010 Renesas Solutions Corp.
5 * Kuninori Morimoto <morimoto.kuninori@renesas.com>
6 *
7 * Based on sh_sir.c
8 * Copyright (C) 2009 Renesas Solutions Corp.
9 * Copyright 2006-2009 Analog Devices Inc.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16/*
17 * CAUTION
18 *
19 * This driver is very simple.
20 * So, it doesn't have below support now
21 * - MIR/FIR support
22 * - DMA transfer support
23 * - FIFO mode support
24 */
25#include <linux/module.h>
26#include <linux/platform_device.h>
27#include <linux/clk.h>
28#include <net/irda/wrapper.h>
29#include <net/irda/irda_device.h>
30
31#define DRIVER_NAME "sh_irda"
32
33#if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
34#define __IRDARAM_LEN 0x13FF
35#else
36#define __IRDARAM_LEN 0x1039
37#endif
38
39#define IRTMR 0x1F00 /* Transfer mode */
40#define IRCFR 0x1F02 /* Configuration */
41#define IRCTR 0x1F04 /* IR control */
42#define IRTFLR 0x1F20 /* Transmit frame length */
43#define IRTCTR 0x1F22 /* Transmit control */
44#define IRRFLR 0x1F40 /* Receive frame length */
45#define IRRCTR 0x1F42 /* Receive control */
46#define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
47#define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
48#define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
49#define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
50#define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
51#define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
52#define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
53#define CRCCTR 0x1F80 /* CRC engine control */
54#define CRCIR 0x1F86 /* CRC engine input data */
55#define CRCCR 0x1F8A /* CRC engine calculation */
56#define CRCOR 0x1F8E /* CRC engine output data */
57#define FIFOCP 0x1FC0 /* FIFO current pointer */
58#define FIFOFP 0x1FC2 /* FIFO follow pointer */
59#define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
60#define FIFORSOR 0x1FC6 /* FIFO receive status OR */
61#define FIFOSEL 0x1FC8 /* FIFO select */
62#define FIFORS 0x1FCA /* FIFO receive status */
63#define FIFORFL 0x1FCC /* FIFO receive frame length */
64#define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
65#define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
66#define BIFCTL 0x1FD2 /* BUS interface control */
67#define IRDARAM 0x0000 /* IrDA buffer RAM */
68#define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
69
70/* IRTMR */
71#define TMD_MASK (0x3 << 14) /* Transfer Mode */
72#define TMD_SIR (0x0 << 14)
73#define TMD_MIR (0x3 << 14)
74#define TMD_FIR (0x2 << 14)
75
76#define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
77#define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
78#define SIM (1 << 0) /* SIR Interrupt Mask */
79#define xIM_MASK (FIFORIM | MIM | SIM)
80
81/* IRCFR */
82#define RTO_SHIFT 8 /* shift for Receive Timeout */
83#define RTO (0x3 << RTO_SHIFT)
84
85/* IRTCTR */
86#define ARMOD (1 << 15) /* Auto-Receive Mode */
87#define TE (1 << 0) /* Transmit Enable */
88
89/* IRRFLR */
90#define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
91
92/* IRRCTR */
93#define RE (1 << 0) /* Receive Enable */
94
95/*
96 * SIRISR, SIRIMR, SIRICR,
97 * MFIRISR, MFIRIMR, MFIRICR
98 */
99#define FRE (1 << 15) /* Frame Receive End */
100#define TROV (1 << 11) /* Transfer Area Overflow */
101#define xIR_9 (1 << 9)
102#define TOT xIR_9 /* for SIR Timeout */
103#define ABTD xIR_9 /* for MIR/FIR Abort Detection */
104#define xIR_8 (1 << 8)
105#define FER xIR_8 /* for SIR Framing Error */
106#define CRCER xIR_8 /* for MIR/FIR CRC error */
107#define FTE (1 << 7) /* Frame Transmit End */
108#define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
109
110/* SIRBCR */
111#define BRC_MASK (0x3F) /* mask for Baud Rate Count */
112
113/* CRCCTR */
114#define CRC_RST (1 << 15) /* CRC Engine Reset */
115#define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
116
117/* CRCIR */
118#define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
119
120/************************************************************************
121
122
123 enum / structure
124
125
126************************************************************************/
127enum sh_irda_mode {
128 SH_IRDA_NONE = 0,
129 SH_IRDA_SIR,
130 SH_IRDA_MIR,
131 SH_IRDA_FIR,
132};
133
134struct sh_irda_self;
135struct sh_irda_xir_func {
136 int (*xir_fre) (struct sh_irda_self *self);
137 int (*xir_trov) (struct sh_irda_self *self);
138 int (*xir_9) (struct sh_irda_self *self);
139 int (*xir_8) (struct sh_irda_self *self);
140 int (*xir_fte) (struct sh_irda_self *self);
141};
142
143struct sh_irda_self {
144 void __iomem *membase;
145 unsigned int irq;
146 struct clk *clk;
147
148 struct net_device *ndev;
149
150 struct irlap_cb *irlap;
151 struct qos_info qos;
152
153 iobuff_t tx_buff;
154 iobuff_t rx_buff;
155
156 enum sh_irda_mode mode;
157 spinlock_t lock;
158
159 struct sh_irda_xir_func *xir_func;
160};
161
162/************************************************************************
163
164
165 common function
166
167
168************************************************************************/
169static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
170{
171 unsigned long flags;
172
173 spin_lock_irqsave(&self->lock, flags);
174 iowrite16(data, self->membase + offset);
175 spin_unlock_irqrestore(&self->lock, flags);
176}
177
178static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
179{
180 unsigned long flags;
181 u16 ret;
182
183 spin_lock_irqsave(&self->lock, flags);
184 ret = ioread16(self->membase + offset);
185 spin_unlock_irqrestore(&self->lock, flags);
186
187 return ret;
188}
189
190static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
191 u16 mask, u16 data)
192{
193 unsigned long flags;
194 u16 old, new;
195
196 spin_lock_irqsave(&self->lock, flags);
197 old = ioread16(self->membase + offset);
198 new = (old & ~mask) | data;
199 if (old != new)
200 iowrite16(data, self->membase + offset);
201 spin_unlock_irqrestore(&self->lock, flags);
202}
203
204/************************************************************************
205
206
207 mode function
208
209
210************************************************************************/
211/*=====================================
212 *
213 * common
214 *
215 *=====================================*/
216static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
217{
218 struct device *dev = &self->ndev->dev;
219
220 sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
221 dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
222}
223
224static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
225{
226 struct device *dev = &self->ndev->dev;
227
228 if (SH_IRDA_SIR != self->mode)
229 interval = 0;
230
231 if (interval < 0 || interval > 2) {
232 dev_err(dev, "unsupported timeout interval\n");
233 return -EINVAL;
234 }
235
236 sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
237 return 0;
238}
239
240static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
241{
242 struct device *dev = &self->ndev->dev;
243 u16 val;
244
245 if (baudrate < 0)
246 return 0;
247
248 if (SH_IRDA_SIR != self->mode) {
249 dev_err(dev, "it is not SIR mode\n");
250 return -EINVAL;
251 }
252
253 /*
254 * Baud rate (bits/s) =
255 * (48 MHz / 26) / (baud rate counter value + 1) x 16
256 */
257 val = (48000000 / 26 / 16 / baudrate) - 1;
258 dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
259
260 sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
261
262 return 0;
263}
264
265static int xir_get_rcv_length(struct sh_irda_self *self)
266{
267 return RFL_MASK & sh_irda_read(self, IRRFLR);
268}
269
270/*=====================================
271 *
272 * NONE MODE
273 *
274 *=====================================*/
275static int xir_fre(struct sh_irda_self *self)
276{
277 struct device *dev = &self->ndev->dev;
278 dev_err(dev, "none mode: frame recv\n");
279 return 0;
280}
281
282static int xir_trov(struct sh_irda_self *self)
283{
284 struct device *dev = &self->ndev->dev;
285 dev_err(dev, "none mode: buffer ram over\n");
286 return 0;
287}
288
289static int xir_9(struct sh_irda_self *self)
290{
291 struct device *dev = &self->ndev->dev;
292 dev_err(dev, "none mode: time over\n");
293 return 0;
294}
295
296static int xir_8(struct sh_irda_self *self)
297{
298 struct device *dev = &self->ndev->dev;
299 dev_err(dev, "none mode: framing error\n");
300 return 0;
301}
302
303static int xir_fte(struct sh_irda_self *self)
304{
305 struct device *dev = &self->ndev->dev;
306 dev_err(dev, "none mode: frame transmit end\n");
307 return 0;
308}
309
310static struct sh_irda_xir_func xir_func = {
311 .xir_fre = xir_fre,
312 .xir_trov = xir_trov,
313 .xir_9 = xir_9,
314 .xir_8 = xir_8,
315 .xir_fte = xir_fte,
316};
317
318/*=====================================
319 *
320 * MIR/FIR MODE
321 *
322 * MIR/FIR are not supported now
323 *=====================================*/
324static struct sh_irda_xir_func mfir_func = {
325 .xir_fre = xir_fre,
326 .xir_trov = xir_trov,
327 .xir_9 = xir_9,
328 .xir_8 = xir_8,
329 .xir_fte = xir_fte,
330};
331
332/*=====================================
333 *
334 * SIR MODE
335 *
336 *=====================================*/
337static int sir_fre(struct sh_irda_self *self)
338{
339 struct device *dev = &self->ndev->dev;
340 u16 data16;
341 u8 *data = (u8 *)&data16;
342 int len = xir_get_rcv_length(self);
343 int i, j;
344
345 if (len > IRDARAM_LEN)
346 len = IRDARAM_LEN;
347
348 dev_dbg(dev, "frame recv length = %d\n", len);
349
350 for (i = 0; i < len; i++) {
351 j = i % 2;
352 if (!j)
353 data16 = sh_irda_read(self, IRDARAM + i);
354
355 async_unwrap_char(self->ndev, &self->ndev->stats,
356 &self->rx_buff, data[j]);
357 }
358 self->ndev->last_rx = jiffies;
359
360 sh_irda_rcv_ctrl(self, 1);
361
362 return 0;
363}
364
365static int sir_trov(struct sh_irda_self *self)
366{
367 struct device *dev = &self->ndev->dev;
368
369 dev_err(dev, "buffer ram over\n");
370 sh_irda_rcv_ctrl(self, 1);
371 return 0;
372}
373
374static int sir_tot(struct sh_irda_self *self)
375{
376 struct device *dev = &self->ndev->dev;
377
378 dev_err(dev, "time over\n");
379 sh_irda_set_baudrate(self, 9600);
380 sh_irda_rcv_ctrl(self, 1);
381 return 0;
382}
383
384static int sir_fer(struct sh_irda_self *self)
385{
386 struct device *dev = &self->ndev->dev;
387
388 dev_err(dev, "framing error\n");
389 sh_irda_rcv_ctrl(self, 1);
390 return 0;
391}
392
393static int sir_fte(struct sh_irda_self *self)
394{
395 struct device *dev = &self->ndev->dev;
396
397 dev_dbg(dev, "frame transmit end\n");
398 netif_wake_queue(self->ndev);
399
400 return 0;
401}
402
403static struct sh_irda_xir_func sir_func = {
404 .xir_fre = sir_fre,
405 .xir_trov = sir_trov,
406 .xir_9 = sir_tot,
407 .xir_8 = sir_fer,
408 .xir_fte = sir_fte,
409};
410
411static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
412{
413 struct device *dev = &self->ndev->dev;
414 struct sh_irda_xir_func *func;
415 const char *name;
416 u16 data;
417
418 switch (mode) {
419 case SH_IRDA_SIR:
420 name = "SIR";
421 data = TMD_SIR;
422 func = &sir_func;
423 break;
424 case SH_IRDA_MIR:
425 name = "MIR";
426 data = TMD_MIR;
427 func = &mfir_func;
428 break;
429 case SH_IRDA_FIR:
430 name = "FIR";
431 data = TMD_FIR;
432 func = &mfir_func;
433 break;
434 default:
435 name = "NONE";
436 data = 0;
437 func = &xir_func;
438 break;
439 }
440
441 self->mode = mode;
442 self->xir_func = func;
443 sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
444
445 dev_dbg(dev, "switch to %s mode", name);
446}
447
448/************************************************************************
449
450
451 irq function
452
453
454************************************************************************/
455static void sh_irda_set_irq_mask(struct sh_irda_self *self)
456{
457 u16 tmr_hole;
458 u16 xir_reg;
459
460 /* set all mask */
461 sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
462 sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
463 sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
464
465 /* clear irq */
466 sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
467 sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
468
469 switch (self->mode) {
470 case SH_IRDA_SIR:
471 tmr_hole = SIM;
472 xir_reg = SIRIMR;
473 break;
474 case SH_IRDA_MIR:
475 case SH_IRDA_FIR:
476 tmr_hole = MIM;
477 xir_reg = MFIRIMR;
478 break;
479 default:
480 tmr_hole = 0;
481 xir_reg = 0;
482 break;
483 }
484
485 /* open mask */
486 if (xir_reg) {
487 sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
488 sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
489 }
490}
491
492static irqreturn_t sh_irda_irq(int irq, void *dev_id)
493{
494 struct sh_irda_self *self = dev_id;
495 struct sh_irda_xir_func *func = self->xir_func;
496 u16 isr = sh_irda_read(self, SIRISR);
497
498 /* clear irq */
499 sh_irda_write(self, SIRICR, isr);
500
501 if (isr & FRE)
502 func->xir_fre(self);
503 if (isr & TROV)
504 func->xir_trov(self);
505 if (isr & xIR_9)
506 func->xir_9(self);
507 if (isr & xIR_8)
508 func->xir_8(self);
509 if (isr & FTE)
510 func->xir_fte(self);
511
512 return IRQ_HANDLED;
513}
514
515/************************************************************************
516
517
518 CRC function
519
520
521************************************************************************/
522static void sh_irda_crc_reset(struct sh_irda_self *self)
523{
524 sh_irda_write(self, CRCCTR, CRC_RST);
525}
526
527static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
528{
529 sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
530}
531
532static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
533{
534 return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
535}
536
537static u16 sh_irda_crc_out(struct sh_irda_self *self)
538{
539 return sh_irda_read(self, CRCOR);
540}
541
542static int sh_irda_crc_init(struct sh_irda_self *self)
543{
544 struct device *dev = &self->ndev->dev;
545 int ret = -EIO;
546 u16 val;
547
548 sh_irda_crc_reset(self);
549
550 sh_irda_crc_add(self, 0xCC);
551 sh_irda_crc_add(self, 0xF5);
552 sh_irda_crc_add(self, 0xF1);
553 sh_irda_crc_add(self, 0xA7);
554
555 val = sh_irda_crc_cnt(self);
556 if (4 != val) {
557 dev_err(dev, "CRC count error %x\n", val);
558 goto crc_init_out;
559 }
560
561 val = sh_irda_crc_out(self);
562 if (0x51DF != val) {
563 dev_err(dev, "CRC result error%x\n", val);
564 goto crc_init_out;
565 }
566
567 ret = 0;
568
569crc_init_out:
570
571 sh_irda_crc_reset(self);
572 return ret;
573}
574
575/************************************************************************
576
577
578 iobuf function
579
580
581************************************************************************/
582static void sh_irda_remove_iobuf(struct sh_irda_self *self)
583{
584 kfree(self->rx_buff.head);
585
586 self->tx_buff.head = NULL;
587 self->tx_buff.data = NULL;
588 self->rx_buff.head = NULL;
589 self->rx_buff.data = NULL;
590}
591
592static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
593{
594 if (self->rx_buff.head ||
595 self->tx_buff.head) {
596 dev_err(&self->ndev->dev, "iobuff has already existed.");
597 return -EINVAL;
598 }
599
600 /* rx_buff */
601 self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
602 if (!self->rx_buff.head)
603 return -ENOMEM;
604
605 self->rx_buff.truesize = rxsize;
606 self->rx_buff.in_frame = FALSE;
607 self->rx_buff.state = OUTSIDE_FRAME;
608 self->rx_buff.data = self->rx_buff.head;
609
610 /* tx_buff */
611 self->tx_buff.head = self->membase + IRDARAM;
612 self->tx_buff.truesize = IRDARAM_LEN;
613
614 return 0;
615}
616
617/************************************************************************
618
619
620 net_device_ops function
621
622
623************************************************************************/
624static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
625{
626 struct sh_irda_self *self = netdev_priv(ndev);
627 struct device *dev = &self->ndev->dev;
628 int speed = irda_get_next_speed(skb);
629 int ret;
630
631 dev_dbg(dev, "hard xmit\n");
632
633 netif_stop_queue(ndev);
634 sh_irda_rcv_ctrl(self, 0);
635
636 ret = sh_irda_set_baudrate(self, speed);
637 if (ret < 0)
638 return ret;
639
640 self->tx_buff.len = 0;
641 if (skb->len) {
642 unsigned long flags;
643
644 spin_lock_irqsave(&self->lock, flags);
645 self->tx_buff.len = async_wrap_skb(skb,
646 self->tx_buff.head,
647 self->tx_buff.truesize);
648 spin_unlock_irqrestore(&self->lock, flags);
649
650 if (self->tx_buff.len > self->tx_buff.truesize)
651 self->tx_buff.len = self->tx_buff.truesize;
652
653 sh_irda_write(self, IRTFLR, self->tx_buff.len);
654 sh_irda_write(self, IRTCTR, ARMOD | TE);
655 }
656
657 dev_kfree_skb(skb);
658
659 return 0;
660}
661
662static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
663{
664 /*
665 * FIXME
666 *
667 * This function is needed for irda framework.
668 * But nothing to do now
669 */
670 return 0;
671}
672
673static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
674{
675 struct sh_irda_self *self = netdev_priv(ndev);
676
677 return &self->ndev->stats;
678}
679
680static int sh_irda_open(struct net_device *ndev)
681{
682 struct sh_irda_self *self = netdev_priv(ndev);
683 int err;
684
685 clk_enable(self->clk);
686 err = sh_irda_crc_init(self);
687 if (err)
688 goto open_err;
689
690 sh_irda_set_mode(self, SH_IRDA_SIR);
691 sh_irda_set_timeout(self, 2);
692 sh_irda_set_baudrate(self, 9600);
693
694 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
695 if (!self->irlap) {
696 err = -ENODEV;
697 goto open_err;
698 }
699
700 netif_start_queue(ndev);
701 sh_irda_rcv_ctrl(self, 1);
702 sh_irda_set_irq_mask(self);
703
704 dev_info(&ndev->dev, "opened\n");
705
706 return 0;
707
708open_err:
709 clk_disable(self->clk);
710
711 return err;
712}
713
714static int sh_irda_stop(struct net_device *ndev)
715{
716 struct sh_irda_self *self = netdev_priv(ndev);
717
718 /* Stop IrLAP */
719 if (self->irlap) {
720 irlap_close(self->irlap);
721 self->irlap = NULL;
722 }
723
724 netif_stop_queue(ndev);
725
726 dev_info(&ndev->dev, "stoped\n");
727
728 return 0;
729}
730
731static const struct net_device_ops sh_irda_ndo = {
732 .ndo_open = sh_irda_open,
733 .ndo_stop = sh_irda_stop,
734 .ndo_start_xmit = sh_irda_hard_xmit,
735 .ndo_do_ioctl = sh_irda_ioctl,
736 .ndo_get_stats = sh_irda_stats,
737};
738
739/************************************************************************
740
741
742 platform_driver function
743
744
745************************************************************************/
746static int __devinit sh_irda_probe(struct platform_device *pdev)
747{
748 struct net_device *ndev;
749 struct sh_irda_self *self;
750 struct resource *res;
751 char clk_name[8];
752 unsigned int irq;
753 int err = -ENOMEM;
754
755 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
756 irq = platform_get_irq(pdev, 0);
757 if (!res || irq < 0) {
758 dev_err(&pdev->dev, "Not enough platform resources.\n");
759 goto exit;
760 }
761
762 ndev = alloc_irdadev(sizeof(*self));
763 if (!ndev)
764 goto exit;
765
766 self = netdev_priv(ndev);
767 self->membase = ioremap_nocache(res->start, resource_size(res));
768 if (!self->membase) {
769 err = -ENXIO;
770 dev_err(&pdev->dev, "Unable to ioremap.\n");
771 goto err_mem_1;
772 }
773
774 err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
775 if (err)
776 goto err_mem_2;
777
778 snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
779 self->clk = clk_get(&pdev->dev, clk_name);
780 if (IS_ERR(self->clk)) {
781 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
782 goto err_mem_3;
783 }
784
785 irda_init_max_qos_capabilies(&self->qos);
786
787 ndev->netdev_ops = &sh_irda_ndo;
788 ndev->irq = irq;
789
790 self->ndev = ndev;
791 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
792 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
793 spin_lock_init(&self->lock);
794
795 irda_qos_bits_to_value(&self->qos);
796
797 err = register_netdev(ndev);
798 if (err)
799 goto err_mem_4;
800
801 platform_set_drvdata(pdev, ndev);
802
803 if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
804 dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
805 goto err_mem_4;
806 }
807
808 dev_info(&pdev->dev, "SuperH IrDA probed\n");
809
810 goto exit;
811
812err_mem_4:
813 clk_put(self->clk);
814err_mem_3:
815 sh_irda_remove_iobuf(self);
816err_mem_2:
817 iounmap(self->membase);
818err_mem_1:
819 free_netdev(ndev);
820exit:
821 return err;
822}
823
824static int __devexit sh_irda_remove(struct platform_device *pdev)
825{
826 struct net_device *ndev = platform_get_drvdata(pdev);
827 struct sh_irda_self *self = netdev_priv(ndev);
828
829 if (!self)
830 return 0;
831
832 unregister_netdev(ndev);
833 clk_put(self->clk);
834 sh_irda_remove_iobuf(self);
835 iounmap(self->membase);
836 free_netdev(ndev);
837 platform_set_drvdata(pdev, NULL);
838
839 return 0;
840}
841
842static struct platform_driver sh_irda_driver = {
843 .probe = sh_irda_probe,
844 .remove = __devexit_p(sh_irda_remove),
845 .driver = {
846 .name = DRIVER_NAME,
847 },
848};
849
850static int __init sh_irda_init(void)
851{
852 return platform_driver_register(&sh_irda_driver);
853}
854
855static void __exit sh_irda_exit(void)
856{
857 platform_driver_unregister(&sh_irda_driver);
858}
859
860module_init(sh_irda_init);
861module_exit(sh_irda_exit);
862
863MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
864MODULE_DESCRIPTION("SuperH IrDA driver");
865MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 0745581c4b5e..5c5f99d50341 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -646,8 +646,10 @@ static int sh_sir_open(struct net_device *ndev)
646 sh_sir_set_baudrate(self, 9600); 646 sh_sir_set_baudrate(self, 9600);
647 647
648 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME); 648 self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
649 if (!self->irlap) 649 if (!self->irlap) {
650 err = -ENODEV;
650 goto open_err; 651 goto open_err;
652 }
651 653
652 /* 654 /*
653 * Now enable the interrupt then start the queue 655 * Now enable the interrupt then start the queue
@@ -707,7 +709,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
707 struct sh_sir_self *self; 709 struct sh_sir_self *self;
708 struct resource *res; 710 struct resource *res;
709 char clk_name[8]; 711 char clk_name[8];
710 void __iomem *base;
711 unsigned int irq; 712 unsigned int irq;
712 int err = -ENOMEM; 713 int err = -ENOMEM;
713 714
@@ -722,14 +723,14 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
722 if (!ndev) 723 if (!ndev)
723 goto exit; 724 goto exit;
724 725
725 base = ioremap_nocache(res->start, resource_size(res)); 726 self = netdev_priv(ndev);
726 if (!base) { 727 self->membase = ioremap_nocache(res->start, resource_size(res));
728 if (!self->membase) {
727 err = -ENXIO; 729 err = -ENXIO;
728 dev_err(&pdev->dev, "Unable to ioremap.\n"); 730 dev_err(&pdev->dev, "Unable to ioremap.\n");
729 goto err_mem_1; 731 goto err_mem_1;
730 } 732 }
731 733
732 self = netdev_priv(ndev);
733 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME); 734 err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
734 if (err) 735 if (err)
735 goto err_mem_2; 736 goto err_mem_2;
@@ -746,7 +747,6 @@ static int __devinit sh_sir_probe(struct platform_device *pdev)
746 ndev->netdev_ops = &sh_sir_ndo; 747 ndev->netdev_ops = &sh_sir_ndo;
747 ndev->irq = irq; 748 ndev->irq = irq;
748 749
749 self->membase = base;
750 self->ndev = ndev; 750 self->ndev = ndev;
751 self->qos.baud_rate.bits &= IR_9600; /* FIXME */ 751 self->qos.baud_rate.bits &= IR_9600; /* FIXME */
752 self->qos.min_turn_time.bits = 1; /* 10 ms or more */ 752 self->qos.min_turn_time.bits = 1; /* 10 ms or more */
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index 209d4bcfaced..e14505272870 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -1742,7 +1742,7 @@ static int vlsi_irda_suspend(struct pci_dev *pdev, pm_message_t state)
1742 vlsi_irda_dev_t *idev; 1742 vlsi_irda_dev_t *idev;
1743 1743
1744 if (!ndev) { 1744 if (!ndev) {
1745 IRDA_ERROR("%s - %s: no netdevice \n", 1745 IRDA_ERROR("%s - %s: no netdevice\n",
1746 __func__, pci_name(pdev)); 1746 __func__, pci_name(pdev));
1747 return 0; 1747 return 0;
1748 } 1748 }
@@ -1781,7 +1781,7 @@ static int vlsi_irda_resume(struct pci_dev *pdev)
1781 vlsi_irda_dev_t *idev; 1781 vlsi_irda_dev_t *idev;
1782 1782
1783 if (!ndev) { 1783 if (!ndev) {
1784 IRDA_ERROR("%s - %s: no netdevice \n", 1784 IRDA_ERROR("%s - %s: no netdevice\n",
1785 __func__, pci_name(pdev)); 1785 __func__, pci_name(pdev));
1786 return 0; 1786 return 0;
1787 } 1787 }
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 773c59c89691..ba1de5973fb2 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -962,15 +962,15 @@ static void veth_set_multicast_list(struct net_device *dev)
962 (netdev_mc_count(dev) > VETH_MAX_MCAST)) { 962 (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
963 port->promiscuous = 1; 963 port->promiscuous = 1;
964 } else { 964 } else {
965 struct dev_mc_list *dmi; 965 struct netdev_hw_addr *ha;
966 966
967 port->promiscuous = 0; 967 port->promiscuous = 0;
968 968
969 /* Update table */ 969 /* Update table */
970 port->num_mcast = 0; 970 port->num_mcast = 0;
971 971
972 netdev_for_each_mc_addr(dmi, dev) { 972 netdev_for_each_mc_addr(ha, dev) {
973 u8 *addr = dmi->dmi_addr; 973 u8 *addr = ha->addr;
974 u64 xaddr = 0; 974 u64 xaddr = 0;
975 975
976 if (addr[0] & 0x01) {/* multicast address? */ 976 if (addr[0] & 0x01) {/* multicast address? */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index c9fef65cb98b..912dd1d5772c 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1058,7 +1058,7 @@ ixgb_set_multi(struct net_device *netdev)
1058{ 1058{
1059 struct ixgb_adapter *adapter = netdev_priv(netdev); 1059 struct ixgb_adapter *adapter = netdev_priv(netdev);
1060 struct ixgb_hw *hw = &adapter->hw; 1060 struct ixgb_hw *hw = &adapter->hw;
1061 struct dev_mc_list *mc_ptr; 1061 struct netdev_hw_addr *ha;
1062 u32 rctl; 1062 u32 rctl;
1063 int i; 1063 int i;
1064 1064
@@ -1089,9 +1089,9 @@ ixgb_set_multi(struct net_device *netdev)
1089 IXGB_WRITE_REG(hw, RCTL, rctl); 1089 IXGB_WRITE_REG(hw, RCTL, rctl);
1090 1090
1091 i = 0; 1091 i = 0;
1092 netdev_for_each_mc_addr(mc_ptr, netdev) 1092 netdev_for_each_mc_addr(ha, netdev)
1093 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS], 1093 memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
1094 mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS); 1094 ha->addr, IXGB_ETH_LENGTH_OF_ADDRESS);
1095 1095
1096 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); 1096 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1097 } 1097 }
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b405a00817c6..f894bb633040 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -1269,7 +1269,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1269 } 1269 }
1270 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1270 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1271 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1271 hw_dbg(hw ,"Flow Director previous command isn't complete, "
1272 "aborting table re-initialization. \n"); 1272 "aborting table re-initialization.\n");
1273 return IXGBE_ERR_FDIR_REINIT_FAILED; 1273 return IXGBE_ERR_FDIR_REINIT_FAILED;
1274 } 1274 }
1275 1275
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index eb49020903c1..6eb5814ca7da 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -1484,26 +1484,24 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr)
1484/** 1484/**
1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1485 * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses
1486 * @hw: pointer to hardware structure 1486 * @hw: pointer to hardware structure
1487 * @mc_addr_list: the list of new multicast addresses 1487 * @netdev: pointer to net device structure
1488 * @mc_addr_count: number of addresses
1489 * @next: iterator function to walk the multicast address list
1490 * 1488 *
1491 * The given list replaces any existing list. Clears the MC addrs from receive 1489 * The given list replaces any existing list. Clears the MC addrs from receive
1492 * address registers and the multicast table. Uses unused receive address 1490 * address registers and the multicast table. Uses unused receive address
1493 * registers for the first multicast addresses, and hashes the rest into the 1491 * registers for the first multicast addresses, and hashes the rest into the
1494 * multicast table. 1492 * multicast table.
1495 **/ 1493 **/
1496s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 1494s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
1497 u32 mc_addr_count, ixgbe_mc_addr_itr next) 1495 struct net_device *netdev)
1498{ 1496{
1497 struct netdev_hw_addr *ha;
1499 u32 i; 1498 u32 i;
1500 u32 vmdq;
1501 1499
1502 /* 1500 /*
1503 * Set the new number of MC addresses that we are being requested to 1501 * Set the new number of MC addresses that we are being requested to
1504 * use. 1502 * use.
1505 */ 1503 */
1506 hw->addr_ctrl.num_mc_addrs = mc_addr_count; 1504 hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1507 hw->addr_ctrl.mta_in_use = 0; 1505 hw->addr_ctrl.mta_in_use = 0;
1508 1506
1509 /* Clear the MTA */ 1507 /* Clear the MTA */
@@ -1512,9 +1510,9 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
1512 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1510 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0);
1513 1511
1514 /* Add the new addresses */ 1512 /* Add the new addresses */
1515 for (i = 0; i < mc_addr_count; i++) { 1513 netdev_for_each_mc_addr(ha, netdev) {
1516 hw_dbg(hw, " Adding the multicast addresses:\n"); 1514 hw_dbg(hw, " Adding the multicast addresses:\n");
1517 ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); 1515 ixgbe_set_mta(hw, ha->addr);
1518 } 1516 }
1519 1517
1520 /* Enable mta */ 1518 /* Enable mta */
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 13606d4809c9..264eef575cd6 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -56,9 +56,8 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
56 u32 enable_addr); 56 u32 enable_addr);
57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 57s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 58s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 59s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
60 u32 mc_addr_count, 60 struct net_device *netdev);
61 ixgbe_mc_addr_itr func);
62s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, 61s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
63 struct net_device *netdev); 62 struct net_device *netdev);
64s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 63s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 8f677cb86290..a98ff0e76e86 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -2482,12 +2482,74 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2482 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false); 2482 hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
2483} 2483}
2484 2484
2485/**
2486 * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
2487 * @adapter: driver data
2488 */
2489static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
2490{
2491 struct ixgbe_hw *hw = &adapter->hw;
2492 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2493 int i, j;
2494
2495 switch (hw->mac.type) {
2496 case ixgbe_mac_82598EB:
2497 vlnctrl &= ~(IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE);
2498 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2499 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2500 break;
2501 case ixgbe_mac_82599EB:
2502 vlnctrl &= ~IXGBE_VLNCTRL_VFE;
2503 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2504 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2505 for (i = 0; i < adapter->num_rx_queues; i++) {
2506 j = adapter->rx_ring[i]->reg_idx;
2507 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2508 vlnctrl &= ~IXGBE_RXDCTL_VME;
2509 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2510 }
2511 break;
2512 default:
2513 break;
2514 }
2515}
2516
2517/**
2518 * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
2519 * @adapter: driver data
2520 */
2521static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
2522{
2523 struct ixgbe_hw *hw = &adapter->hw;
2524 u32 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2525 int i, j;
2526
2527 switch (hw->mac.type) {
2528 case ixgbe_mac_82598EB:
2529 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2530 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2531 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2532 break;
2533 case ixgbe_mac_82599EB:
2534 vlnctrl |= IXGBE_VLNCTRL_VFE;
2535 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2536 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2537 for (i = 0; i < adapter->num_rx_queues; i++) {
2538 j = adapter->rx_ring[i]->reg_idx;
2539 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2540 vlnctrl |= IXGBE_RXDCTL_VME;
2541 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2542 }
2543 break;
2544 default:
2545 break;
2546 }
2547}
2548
2485static void ixgbe_vlan_rx_register(struct net_device *netdev, 2549static void ixgbe_vlan_rx_register(struct net_device *netdev,
2486 struct vlan_group *grp) 2550 struct vlan_group *grp)
2487{ 2551{
2488 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2552 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2489 u32 ctrl;
2490 int i, j;
2491 2553
2492 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2554 if (!test_bit(__IXGBE_DOWN, &adapter->state))
2493 ixgbe_irq_disable(adapter); 2555 ixgbe_irq_disable(adapter);
@@ -2498,25 +2560,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
2498 * still receive traffic from a DCB-enabled host even if we're 2560 * still receive traffic from a DCB-enabled host even if we're
2499 * not in DCB mode. 2561 * not in DCB mode.
2500 */ 2562 */
2501 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL); 2563 ixgbe_vlan_filter_enable(adapter);
2502
2503 /* Disable CFI check */
2504 ctrl &= ~IXGBE_VLNCTRL_CFIEN;
2505
2506 /* enable VLAN tag stripping */
2507 if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
2508 ctrl |= IXGBE_VLNCTRL_VME;
2509 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
2510 for (i = 0; i < adapter->num_rx_queues; i++) {
2511 u32 ctrl;
2512 j = adapter->rx_ring[i]->reg_idx;
2513 ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
2514 ctrl |= IXGBE_RXDCTL_VME;
2515 IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
2516 }
2517 }
2518
2519 IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
2520 2564
2521 ixgbe_vlan_rx_add_vid(netdev, 0); 2565 ixgbe_vlan_rx_add_vid(netdev, 0);
2522 2566
@@ -2538,21 +2582,6 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
2538 } 2582 }
2539} 2583}
2540 2584
2541static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
2542{
2543 struct dev_mc_list *mc_ptr;
2544 u8 *addr = *mc_addr_ptr;
2545 *vmdq = 0;
2546
2547 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
2548 if (mc_ptr->next)
2549 *mc_addr_ptr = mc_ptr->next->dmi_addr;
2550 else
2551 *mc_addr_ptr = NULL;
2552
2553 return addr;
2554}
2555
2556/** 2585/**
2557 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set 2586 * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
2558 * @netdev: network interface device structure 2587 * @netdev: network interface device structure
@@ -2566,19 +2595,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2566{ 2595{
2567 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2596 struct ixgbe_adapter *adapter = netdev_priv(netdev);
2568 struct ixgbe_hw *hw = &adapter->hw; 2597 struct ixgbe_hw *hw = &adapter->hw;
2569 u32 fctrl, vlnctrl; 2598 u32 fctrl;
2570 u8 *addr_list = NULL;
2571 int addr_count = 0;
2572 2599
2573 /* Check for Promiscuous and All Multicast modes */ 2600 /* Check for Promiscuous and All Multicast modes */
2574 2601
2575 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 2602 fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
2576 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
2577 2603
2578 if (netdev->flags & IFF_PROMISC) { 2604 if (netdev->flags & IFF_PROMISC) {
2579 hw->addr_ctrl.user_set_promisc = 1; 2605 hw->addr_ctrl.user_set_promisc = 1;
2580 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2606 fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2581 vlnctrl &= ~IXGBE_VLNCTRL_VFE; 2607 /* don't hardware filter vlans in promisc mode */
2608 ixgbe_vlan_filter_disable(adapter);
2582 } else { 2609 } else {
2583 if (netdev->flags & IFF_ALLMULTI) { 2610 if (netdev->flags & IFF_ALLMULTI) {
2584 fctrl |= IXGBE_FCTRL_MPE; 2611 fctrl |= IXGBE_FCTRL_MPE;
@@ -2586,22 +2613,18 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
2586 } else { 2613 } else {
2587 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); 2614 fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
2588 } 2615 }
2589 vlnctrl |= IXGBE_VLNCTRL_VFE; 2616 ixgbe_vlan_filter_enable(adapter);
2590 hw->addr_ctrl.user_set_promisc = 0; 2617 hw->addr_ctrl.user_set_promisc = 0;
2591 } 2618 }
2592 2619
2593 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); 2620 IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
2594 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2595 2621
2596 /* reprogram secondary unicast list */ 2622 /* reprogram secondary unicast list */
2597 hw->mac.ops.update_uc_addr_list(hw, netdev); 2623 hw->mac.ops.update_uc_addr_list(hw, netdev);
2598 2624
2599 /* reprogram multicast list */ 2625 /* reprogram multicast list */
2600 addr_count = netdev_mc_count(netdev); 2626 hw->mac.ops.update_mc_addr_list(hw, netdev);
2601 if (addr_count) 2627
2602 addr_list = netdev->mc_list->dmi_addr;
2603 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
2604 ixgbe_addr_list_itr);
2605 if (adapter->num_vfs) 2628 if (adapter->num_vfs)
2606 ixgbe_restore_vf_multicasts(adapter); 2629 ixgbe_restore_vf_multicasts(adapter);
2607} 2630}
@@ -2661,7 +2684,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
2661static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) 2684static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2662{ 2685{
2663 struct ixgbe_hw *hw = &adapter->hw; 2686 struct ixgbe_hw *hw = &adapter->hw;
2664 u32 txdctl, vlnctrl; 2687 u32 txdctl;
2665 int i, j; 2688 int i, j;
2666 2689
2667 ixgbe_dcb_check_config(&adapter->dcb_cfg); 2690 ixgbe_dcb_check_config(&adapter->dcb_cfg);
@@ -2679,22 +2702,8 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
2679 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl); 2702 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
2680 } 2703 }
2681 /* Enable VLAN tag insert/strip */ 2704 /* Enable VLAN tag insert/strip */
2682 vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 2705 ixgbe_vlan_filter_enable(adapter);
2683 if (hw->mac.type == ixgbe_mac_82598EB) { 2706
2684 vlnctrl |= IXGBE_VLNCTRL_VME | IXGBE_VLNCTRL_VFE;
2685 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2686 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2687 } else if (hw->mac.type == ixgbe_mac_82599EB) {
2688 vlnctrl |= IXGBE_VLNCTRL_VFE;
2689 vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
2690 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
2691 for (i = 0; i < adapter->num_rx_queues; i++) {
2692 j = adapter->rx_ring[i]->reg_idx;
2693 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
2694 vlnctrl |= IXGBE_RXDCTL_VME;
2695 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
2696 }
2697 }
2698 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 2707 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
2699} 2708}
2700 2709
@@ -3471,12 +3480,12 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
3471 adapter->num_tx_queues = 1; 3480 adapter->num_tx_queues = 1;
3472#ifdef CONFIG_IXGBE_DCB 3481#ifdef CONFIG_IXGBE_DCB
3473 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 3482 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
3474 DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n"); 3483 DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
3475 ixgbe_set_dcb_queues(adapter); 3484 ixgbe_set_dcb_queues(adapter);
3476 } 3485 }
3477#endif 3486#endif
3478 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 3487 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
3479 DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n"); 3488 DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
3480 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 3489 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
3481 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 3490 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
3482 ixgbe_set_fdir_queues(adapter); 3491 ixgbe_set_fdir_queues(adapter);
@@ -5092,7 +5101,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5092 &(adapter->tx_ring[i]->reinit_state)); 5101 &(adapter->tx_ring[i]->reinit_state));
5093 } else { 5102 } else {
5094 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, " 5103 DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
5095 "ignored adding FDIR ATR filters \n"); 5104 "ignored adding FDIR ATR filters\n");
5096 } 5105 }
5097 /* Done FDIR Re-initialization, enable transmits */ 5106 /* Done FDIR Re-initialization, enable transmits */
5098 netif_tx_start_all_queues(adapter->netdev); 5107 netif_tx_start_all_queues(adapter->netdev);
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 1c1efd386956..d6d5b843d625 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -475,7 +475,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
475 msleep(edata); 475 msleep(edata);
476 break; 476 break;
477 case IXGBE_DATA_NL: 477 case IXGBE_DATA_NL:
478 hw_dbg(hw, "DATA: \n"); 478 hw_dbg(hw, "DATA:\n");
479 data_offset++; 479 data_offset++;
480 hw->eeprom.ops.read(hw, data_offset++, 480 hw->eeprom.ops.read(hw, data_offset++,
481 &phy_offset); 481 &phy_offset);
@@ -491,7 +491,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
491 break; 491 break;
492 case IXGBE_CONTROL_NL: 492 case IXGBE_CONTROL_NL:
493 data_offset++; 493 data_offset++;
494 hw_dbg(hw, "CONTROL: \n"); 494 hw_dbg(hw, "CONTROL:\n");
495 if (edata == IXGBE_CONTROL_EOL_NL) { 495 if (edata == IXGBE_CONTROL_EOL_NL) {
496 hw_dbg(hw, "EOL\n"); 496 hw_dbg(hw, "EOL\n");
497 end_data = true; 497 end_data = true;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 4ec6dc1a5b75..aed4ed665648 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -2417,8 +2417,7 @@ struct ixgbe_mac_operations {
2417 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 2417 s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
2418 s32 (*init_rx_addrs)(struct ixgbe_hw *); 2418 s32 (*init_rx_addrs)(struct ixgbe_hw *);
2419 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *); 2419 s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
2420 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 2420 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
2421 ixgbe_mc_addr_itr);
2422 s32 (*enable_mc)(struct ixgbe_hw *); 2421 s32 (*enable_mc)(struct ixgbe_hw *);
2423 s32 (*disable_mc)(struct ixgbe_hw *); 2422 s32 (*disable_mc)(struct ixgbe_hw *);
2424 s32 (*clear_vfta)(struct ixgbe_hw *); 2423 s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 0cd6202dfacc..f484161418b6 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -604,14 +604,13 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
604 * packets not getting split correctly 604 * packets not getting split correctly
605 */ 605 */
606 if (staterr & IXGBE_RXD_STAT_LB) { 606 if (staterr & IXGBE_RXD_STAT_LB) {
607 u32 header_fixup_len = skb->len - skb->data_len; 607 u32 header_fixup_len = skb_headlen(skb);
608 if (header_fixup_len < 14) 608 if (header_fixup_len < 14)
609 skb_push(skb, header_fixup_len); 609 skb_push(skb, header_fixup_len);
610 } 610 }
611 skb->protocol = eth_type_trans(skb, adapter->netdev); 611 skb->protocol = eth_type_trans(skb, adapter->netdev);
612 612
613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc); 613 ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
614 adapter->netdev->last_rx = jiffies;
615 614
616next_desc: 615next_desc:
617 rx_desc->wb.upper.status_error = 0; 616 rx_desc->wb.upper.status_error = 0;
@@ -1496,22 +1495,6 @@ static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
1496 } 1495 }
1497} 1496}
1498 1497
1499static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
1500 u32 *vmdq)
1501{
1502 struct dev_mc_list *mc_ptr;
1503 u8 *addr = *mc_addr_ptr;
1504 *vmdq = 0;
1505
1506 mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
1507 if (mc_ptr->next)
1508 *mc_addr_ptr = mc_ptr->next->dmi_addr;
1509 else
1510 *mc_addr_ptr = NULL;
1511
1512 return addr;
1513}
1514
1515/** 1498/**
1516 * ixgbevf_set_rx_mode - Multicast set 1499 * ixgbevf_set_rx_mode - Multicast set
1517 * @netdev: network interface device structure 1500 * @netdev: network interface device structure
@@ -1524,16 +1507,10 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
1524{ 1507{
1525 struct ixgbevf_adapter *adapter = netdev_priv(netdev); 1508 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
1526 struct ixgbe_hw *hw = &adapter->hw; 1509 struct ixgbe_hw *hw = &adapter->hw;
1527 u8 *addr_list = NULL;
1528 int addr_count = 0;
1529 1510
1530 /* reprogram multicast list */ 1511 /* reprogram multicast list */
1531 addr_count = netdev_mc_count(netdev);
1532 if (addr_count)
1533 addr_list = netdev->mc_list->dmi_addr;
1534 if (hw->mac.ops.update_mc_addr_list) 1512 if (hw->mac.ops.update_mc_addr_list)
1535 hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 1513 hw->mac.ops.update_mc_addr_list(hw, netdev);
1536 ixgbevf_addr_list_itr);
1537} 1514}
1538 1515
1539static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) 1516static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@ -2418,9 +2395,9 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
2418 2395
2419 if (link_up) { 2396 if (link_up) {
2420 if (!netif_carrier_ok(netdev)) { 2397 if (!netif_carrier_ok(netdev)) {
2421 hw_dbg(&adapter->hw, "NIC Link is Up %s, ", 2398 hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
2422 ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 2399 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
2423 "10 Gbps\n" : "1 Gbps\n")); 2400 10 : 1);
2424 netif_carrier_on(netdev); 2401 netif_carrier_on(netdev);
2425 netif_tx_wake_all_queues(netdev); 2402 netif_tx_wake_all_queues(netdev);
2426 } else { 2403 } else {
@@ -3482,7 +3459,7 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3482 3459
3483 hw_dbg(hw, "MAC: %d\n", hw->mac.type); 3460 hw_dbg(hw, "MAC: %d\n", hw->mac.type);
3484 3461
3485 hw_dbg(hw, "LRO is disabled \n"); 3462 hw_dbg(hw, "LRO is disabled\n");
3486 3463
3487 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n"); 3464 hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
3488 cards_found++; 3465 cards_found++;
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index 4b5dec0ec140..852e9c4fd934 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -252,22 +252,18 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
252/** 252/**
253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses 253 * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
254 * @hw: pointer to the HW structure 254 * @hw: pointer to the HW structure
255 * @mc_addr_list: array of multicast addresses to program 255 * @netdev: pointer to net device structure
256 * @mc_addr_count: number of multicast addresses to program
257 * @next: caller supplied function to return next address in list
258 * 256 *
259 * Updates the Multicast Table Array. 257 * Updates the Multicast Table Array.
260 **/ 258 **/
261static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, 259static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
262 u32 mc_addr_count, 260 struct net_device *netdev)
263 ixgbe_mc_addr_itr next)
264{ 261{
262 struct netdev_hw_addr *ha;
265 struct ixgbe_mbx_info *mbx = &hw->mbx; 263 struct ixgbe_mbx_info *mbx = &hw->mbx;
266 u32 msgbuf[IXGBE_VFMAILBOX_SIZE]; 264 u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
267 u16 *vector_list = (u16 *)&msgbuf[1]; 265 u16 *vector_list = (u16 *)&msgbuf[1];
268 u32 vector;
269 u32 cnt, i; 266 u32 cnt, i;
270 u32 vmdq;
271 267
272 /* Each entry in the list uses 1 16 bit word. We have 30 268 /* Each entry in the list uses 1 16 bit word. We have 30
273 * 16 bit words available in our HW msg buffer (minus 1 for the 269 * 16 bit words available in our HW msg buffer (minus 1 for the
@@ -278,13 +274,17 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
278 * addresses except for in large enterprise network environments. 274 * addresses except for in large enterprise network environments.
279 */ 275 */
280 276
281 cnt = (mc_addr_count > 30) ? 30 : mc_addr_count; 277 cnt = netdev_mc_count(netdev);
278 if (cnt > 30)
279 cnt = 30;
282 msgbuf[0] = IXGBE_VF_SET_MULTICAST; 280 msgbuf[0] = IXGBE_VF_SET_MULTICAST;
283 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT; 281 msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
284 282
285 for (i = 0; i < cnt; i++) { 283 i = 0;
286 vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq)); 284 netdev_for_each_mc_addr(ha, netdev) {
287 vector_list[i] = vector; 285 if (i == cnt)
286 break;
287 vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
288 } 288 }
289 289
290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE); 290 mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 1f31b052d4b4..94b750b8874f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -32,6 +32,7 @@
32#include <linux/delay.h> 32#include <linux/delay.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/if_ether.h> 34#include <linux/if_ether.h>
35#include <linux/netdevice.h>
35 36
36#include "defines.h" 37#include "defines.h"
37#include "regs.h" 38#include "regs.h"
@@ -62,8 +63,7 @@ struct ixgbe_mac_operations {
62 /* RAR, Multicast, VLAN */ 63 /* RAR, Multicast, VLAN */
63 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32); 64 s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
64 s32 (*init_rx_addrs)(struct ixgbe_hw *); 65 s32 (*init_rx_addrs)(struct ixgbe_hw *);
65 s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 66 s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
66 ixgbe_mc_addr_itr);
67 s32 (*enable_mc)(struct ixgbe_hw *); 67 s32 (*enable_mc)(struct ixgbe_hw *);
68 s32 (*disable_mc)(struct ixgbe_hw *); 68 s32 (*disable_mc)(struct ixgbe_hw *);
69 s32 (*clear_vfta)(struct ixgbe_hw *); 69 s32 (*clear_vfta)(struct ixgbe_hw *);
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index b705ad3a53a7..4e868eeac89e 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2010,12 +2010,12 @@ jme_set_multi(struct net_device *netdev)
2010 } else if (netdev->flags & IFF_ALLMULTI) { 2010 } else if (netdev->flags & IFF_ALLMULTI) {
2011 jme->reg_rxmcs |= RXMCS_ALLMULFRAME; 2011 jme->reg_rxmcs |= RXMCS_ALLMULFRAME;
2012 } else if (netdev->flags & IFF_MULTICAST) { 2012 } else if (netdev->flags & IFF_MULTICAST) {
2013 struct dev_mc_list *mclist; 2013 struct netdev_hw_addr *ha;
2014 int bit_nr; 2014 int bit_nr;
2015 2015
2016 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED; 2016 jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
2017 netdev_for_each_mc_addr(mclist, netdev) { 2017 netdev_for_each_mc_addr(ha, netdev) {
2018 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F; 2018 bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3F;
2019 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F); 2019 mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
2020 } 2020 }
2021 2021
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 300c2249812d..26bf1b76b997 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
482{ 482{
483 struct korina_private *lp = netdev_priv(dev); 483 struct korina_private *lp = netdev_priv(dev);
484 unsigned long flags; 484 unsigned long flags;
485 struct dev_mc_list *dmi; 485 struct netdev_hw_addr *ha;
486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */ 486 u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
487 int i; 487 int i;
488 488
@@ -502,8 +502,8 @@ static void korina_multicast_list(struct net_device *dev)
502 for (i = 0; i < 4; i++) 502 for (i = 0; i < 4; i++)
503 hash_table[i] = 0; 503 hash_table[i] = 0;
504 504
505 netdev_for_each_mc_addr(dmi, dev) { 505 netdev_for_each_mc_addr(ha, dev) {
506 char *addrs = dmi->dmi_addr; 506 char *addrs = ha->addr;
507 507
508 if (!(*addrs & 1)) 508 if (!(*addrs & 1))
509 continue; 509 continue;
@@ -1135,7 +1135,7 @@ static int korina_probe(struct platform_device *pdev)
1135 1135
1136 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs"); 1136 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_regs");
1137 dev->base_addr = r->start; 1137 dev->base_addr = r->start;
1138 lp->eth_regs = ioremap_nocache(r->start, r->end - r->start); 1138 lp->eth_regs = ioremap_nocache(r->start, resource_size(r));
1139 if (!lp->eth_regs) { 1139 if (!lp->eth_regs) {
1140 printk(KERN_ERR DRV_NAME ": cannot remap registers\n"); 1140 printk(KERN_ERR DRV_NAME ": cannot remap registers\n");
1141 rc = -ENXIO; 1141 rc = -ENXIO;
@@ -1143,7 +1143,7 @@ static int korina_probe(struct platform_device *pdev)
1143 } 1143 }
1144 1144
1145 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx"); 1145 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_rx");
1146 lp->rx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1146 lp->rx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1147 if (!lp->rx_dma_regs) { 1147 if (!lp->rx_dma_regs) {
1148 printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n"); 1148 printk(KERN_ERR DRV_NAME ": cannot remap Rx DMA registers\n");
1149 rc = -ENXIO; 1149 rc = -ENXIO;
@@ -1151,7 +1151,7 @@ static int korina_probe(struct platform_device *pdev)
1151 } 1151 }
1152 1152
1153 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx"); 1153 r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "korina_dma_tx");
1154 lp->tx_dma_regs = ioremap_nocache(r->start, r->end - r->start); 1154 lp->tx_dma_regs = ioremap_nocache(r->start, resource_size(r));
1155 if (!lp->tx_dma_regs) { 1155 if (!lp->tx_dma_regs) {
1156 printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n"); 1156 printk(KERN_ERR DRV_NAME ": cannot remap Tx DMA registers\n");
1157 rc = -ENXIO; 1157 rc = -ENXIO;
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 5c45cb58d023..b91492f4e48a 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -20,6 +20,8 @@
20 * The Micrel KS8842 behind the timberdale FPGA 20 * The Micrel KS8842 behind the timberdale FPGA
21 */ 21 */
22 22
23#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
23#include <linux/kernel.h> 25#include <linux/kernel.h>
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/platform_device.h> 27#include <linux/platform_device.h>
@@ -525,8 +527,7 @@ static int ks8842_open(struct net_device *netdev)
525 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME, 527 err = request_irq(adapter->irq, ks8842_irq, IRQF_SHARED, DRV_NAME,
526 adapter); 528 adapter);
527 if (err) { 529 if (err) {
528 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 530 pr_err("Failed to request IRQ: %d: %d\n", adapter->irq, err);
529 adapter->irq, err);
530 return err; 531 return err;
531 } 532 }
532 533
@@ -668,8 +669,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev)
668 669
669 platform_set_drvdata(pdev, netdev); 670 platform_set_drvdata(pdev, netdev);
670 671
671 printk(KERN_INFO DRV_NAME 672 pr_info("Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
672 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
673 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7); 673 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
674 674
675 return 0; 675 return 0;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 13cc1ca261d9..4dcd61f81ec2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -9,6 +9,8 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#define DEBUG 14#define DEBUG
13 15
14#include <linux/module.h> 16#include <linux/module.h>
@@ -125,11 +127,6 @@ struct ks8851_net {
125 127
126static int msg_enable; 128static int msg_enable;
127 129
128#define ks_info(_ks, _msg...) dev_info(&(_ks)->spidev->dev, _msg)
129#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->spidev->dev, _msg)
130#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->spidev->dev, _msg)
131#define ks_err(_ks, _msg...) dev_err(&(_ks)->spidev->dev, _msg)
132
133/* shift for byte-enable data */ 130/* shift for byte-enable data */
134#define BYTE_EN(_x) ((_x) << 2) 131#define BYTE_EN(_x) ((_x) << 2)
135 132
@@ -167,7 +164,7 @@ static void ks8851_wrreg16(struct ks8851_net *ks, unsigned reg, unsigned val)
167 164
168 ret = spi_sync(ks->spidev, msg); 165 ret = spi_sync(ks->spidev, msg);
169 if (ret < 0) 166 if (ret < 0)
170 ks_err(ks, "spi_sync() failed\n"); 167 netdev_err(ks->netdev, "spi_sync() failed\n");
171} 168}
172 169
173/** 170/**
@@ -197,7 +194,7 @@ static void ks8851_wrreg8(struct ks8851_net *ks, unsigned reg, unsigned val)
197 194
198 ret = spi_sync(ks->spidev, msg); 195 ret = spi_sync(ks->spidev, msg);
199 if (ret < 0) 196 if (ret < 0)
200 ks_err(ks, "spi_sync() failed\n"); 197 netdev_err(ks->netdev, "spi_sync() failed\n");
201} 198}
202 199
203/** 200/**
@@ -263,7 +260,7 @@ static void ks8851_rdreg(struct ks8851_net *ks, unsigned op,
263 260
264 ret = spi_sync(ks->spidev, msg); 261 ret = spi_sync(ks->spidev, msg);
265 if (ret < 0) 262 if (ret < 0)
266 ks_err(ks, "read: spi_sync() failed\n"); 263 netdev_err(ks->netdev, "read: spi_sync() failed\n");
267 else if (ks8851_rx_1msg(ks)) 264 else if (ks8851_rx_1msg(ks))
268 memcpy(rxb, trx + 2, rxl); 265 memcpy(rxb, trx + 2, rxl);
269 else 266 else
@@ -417,8 +414,8 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
417 u8 txb[1]; 414 u8 txb[1];
418 int ret; 415 int ret;
419 416
420 if (netif_msg_rx_status(ks)) 417 netif_dbg(ks, rx_status, ks->netdev,
421 ks_dbg(ks, "%s: %d@%p\n", __func__, len, buff); 418 "%s: %d@%p\n", __func__, len, buff);
422 419
423 /* set the operation we're issuing */ 420 /* set the operation we're issuing */
424 txb[0] = KS_SPIOP_RXFIFO; 421 txb[0] = KS_SPIOP_RXFIFO;
@@ -434,7 +431,7 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
434 431
435 ret = spi_sync(ks->spidev, msg); 432 ret = spi_sync(ks->spidev, msg);
436 if (ret < 0) 433 if (ret < 0)
437 ks_err(ks, "%s: spi_sync() failed\n", __func__); 434 netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
438} 435}
439 436
440/** 437/**
@@ -446,10 +443,11 @@ static void ks8851_rdfifo(struct ks8851_net *ks, u8 *buff, unsigned len)
446*/ 443*/
447static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt) 444static void ks8851_dbg_dumpkkt(struct ks8851_net *ks, u8 *rxpkt)
448{ 445{
449 ks_dbg(ks, "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", 446 netdev_dbg(ks->netdev,
450 rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7], 447 "pkt %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n",
451 rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11], 448 rxpkt[4], rxpkt[5], rxpkt[6], rxpkt[7],
452 rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]); 449 rxpkt[8], rxpkt[9], rxpkt[10], rxpkt[11],
450 rxpkt[12], rxpkt[13], rxpkt[14], rxpkt[15]);
453} 451}
454 452
455/** 453/**
@@ -471,8 +469,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
471 469
472 rxfc = ks8851_rdreg8(ks, KS_RXFC); 470 rxfc = ks8851_rdreg8(ks, KS_RXFC);
473 471
474 if (netif_msg_rx_status(ks)) 472 netif_dbg(ks, rx_status, ks->netdev,
475 ks_dbg(ks, "%s: %d packets\n", __func__, rxfc); 473 "%s: %d packets\n", __func__, rxfc);
476 474
477 /* Currently we're issuing a read per packet, but we could possibly 475 /* Currently we're issuing a read per packet, but we could possibly
478 * improve the code by issuing a single read, getting the receive 476 * improve the code by issuing a single read, getting the receive
@@ -489,9 +487,8 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
489 rxstat = rxh & 0xffff; 487 rxstat = rxh & 0xffff;
490 rxlen = rxh >> 16; 488 rxlen = rxh >> 16;
491 489
492 if (netif_msg_rx_status(ks)) 490 netif_dbg(ks, rx_status, ks->netdev,
493 ks_dbg(ks, "rx: stat 0x%04x, len 0x%04x\n", 491 "rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
494 rxstat, rxlen);
495 492
496 /* the length of the packet includes the 32bit CRC */ 493 /* the length of the packet includes the 32bit CRC */
497 494
@@ -553,9 +550,8 @@ static void ks8851_irq_work(struct work_struct *work)
553 550
554 status = ks8851_rdreg16(ks, KS_ISR); 551 status = ks8851_rdreg16(ks, KS_ISR);
555 552
556 if (netif_msg_intr(ks)) 553 netif_dbg(ks, intr, ks->netdev,
557 dev_dbg(&ks->spidev->dev, "%s: status 0x%04x\n", 554 "%s: status 0x%04x\n", __func__, status);
558 __func__, status);
559 555
560 if (status & IRQ_LCI) { 556 if (status & IRQ_LCI) {
561 /* should do something about checking link status */ 557 /* should do something about checking link status */
@@ -582,8 +578,8 @@ static void ks8851_irq_work(struct work_struct *work)
582 * system */ 578 * system */
583 ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR); 579 ks->tx_space = ks8851_rdreg16(ks, KS_TXMIR);
584 580
585 if (netif_msg_intr(ks)) 581 netif_dbg(ks, intr, ks->netdev,
586 ks_dbg(ks, "%s: txspace %d\n", __func__, ks->tx_space); 582 "%s: txspace %d\n", __func__, ks->tx_space);
587 } 583 }
588 584
589 if (status & IRQ_RXI) 585 if (status & IRQ_RXI)
@@ -659,9 +655,8 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
659 unsigned fid = 0; 655 unsigned fid = 0;
660 int ret; 656 int ret;
661 657
662 if (netif_msg_tx_queued(ks)) 658 netif_dbg(ks, tx_queued, ks->netdev, "%s: skb %p, %d@%p, irq %d\n",
663 dev_dbg(&ks->spidev->dev, "%s: skb %p, %d@%p, irq %d\n", 659 __func__, txp, txp->len, txp->data, irq);
664 __func__, txp, txp->len, txp->data, irq);
665 660
666 fid = ks->fid++; 661 fid = ks->fid++;
667 fid &= TXFR_TXFID_MASK; 662 fid &= TXFR_TXFID_MASK;
@@ -685,7 +680,7 @@ static void ks8851_wrpkt(struct ks8851_net *ks, struct sk_buff *txp, bool irq)
685 680
686 ret = spi_sync(ks->spidev, msg); 681 ret = spi_sync(ks->spidev, msg);
687 if (ret < 0) 682 if (ret < 0)
688 ks_err(ks, "%s: spi_sync() failed\n", __func__); 683 netdev_err(ks->netdev, "%s: spi_sync() failed\n", __func__);
689} 684}
690 685
691/** 686/**
@@ -744,8 +739,7 @@ static void ks8851_set_powermode(struct ks8851_net *ks, unsigned pwrmode)
744{ 739{
745 unsigned pmecr; 740 unsigned pmecr;
746 741
747 if (netif_msg_hw(ks)) 742 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
748 ks_dbg(ks, "setting power mode %d\n", pwrmode);
749 743
750 pmecr = ks8851_rdreg16(ks, KS_PMECR); 744 pmecr = ks8851_rdreg16(ks, KS_PMECR);
751 pmecr &= ~PMECR_PM_MASK; 745 pmecr &= ~PMECR_PM_MASK;
@@ -769,8 +763,7 @@ static int ks8851_net_open(struct net_device *dev)
769 * else at the moment */ 763 * else at the moment */
770 mutex_lock(&ks->lock); 764 mutex_lock(&ks->lock);
771 765
772 if (netif_msg_ifup(ks)) 766 netif_dbg(ks, ifup, ks->netdev, "opening\n");
773 ks_dbg(ks, "opening %s\n", dev->name);
774 767
775 /* bring chip out of any power saving mode it was in */ 768 /* bring chip out of any power saving mode it was in */
776 ks8851_set_powermode(ks, PMECR_PM_NORMAL); 769 ks8851_set_powermode(ks, PMECR_PM_NORMAL);
@@ -826,8 +819,7 @@ static int ks8851_net_open(struct net_device *dev)
826 819
827 netif_start_queue(ks->netdev); 820 netif_start_queue(ks->netdev);
828 821
829 if (netif_msg_ifup(ks)) 822 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
830 ks_dbg(ks, "network device %s up\n", dev->name);
831 823
832 mutex_unlock(&ks->lock); 824 mutex_unlock(&ks->lock);
833 return 0; 825 return 0;
@@ -845,8 +837,7 @@ static int ks8851_net_stop(struct net_device *dev)
845{ 837{
846 struct ks8851_net *ks = netdev_priv(dev); 838 struct ks8851_net *ks = netdev_priv(dev);
847 839
848 if (netif_msg_ifdown(ks)) 840 netif_info(ks, ifdown, dev, "shutting down\n");
849 ks_info(ks, "%s: shutting down\n", dev->name);
850 841
851 netif_stop_queue(dev); 842 netif_stop_queue(dev);
852 843
@@ -874,8 +865,8 @@ static int ks8851_net_stop(struct net_device *dev)
874 while (!skb_queue_empty(&ks->txq)) { 865 while (!skb_queue_empty(&ks->txq)) {
875 struct sk_buff *txb = skb_dequeue(&ks->txq); 866 struct sk_buff *txb = skb_dequeue(&ks->txq);
876 867
877 if (netif_msg_ifdown(ks)) 868 netif_dbg(ks, ifdown, ks->netdev,
878 ks_dbg(ks, "%s: freeing txb %p\n", __func__, txb); 869 "%s: freeing txb %p\n", __func__, txb);
879 870
880 dev_kfree_skb(txb); 871 dev_kfree_skb(txb);
881 } 872 }
@@ -904,9 +895,8 @@ static netdev_tx_t ks8851_start_xmit(struct sk_buff *skb,
904 unsigned needed = calc_txlen(skb->len); 895 unsigned needed = calc_txlen(skb->len);
905 netdev_tx_t ret = NETDEV_TX_OK; 896 netdev_tx_t ret = NETDEV_TX_OK;
906 897
907 if (netif_msg_tx_queued(ks)) 898 netif_dbg(ks, tx_queued, ks->netdev,
908 ks_dbg(ks, "%s: skb %p, %d@%p\n", __func__, 899 "%s: skb %p, %d@%p\n", __func__, skb, skb->len, skb->data);
909 skb, skb->len, skb->data);
910 900
911 spin_lock(&ks->statelock); 901 spin_lock(&ks->statelock);
912 902
@@ -966,13 +956,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
966 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE | 956 rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
967 RXCR1_RXPAFMA | RXCR1_RXMAFMA); 957 RXCR1_RXPAFMA | RXCR1_RXMAFMA);
968 } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) { 958 } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
969 struct dev_mc_list *mcptr; 959 struct netdev_hw_addr *ha;
970 u32 crc; 960 u32 crc;
971 961
972 /* accept some multicast */ 962 /* accept some multicast */
973 963
974 netdev_for_each_mc_addr(mcptr, dev) { 964 netdev_for_each_mc_addr(ha, dev) {
975 crc = ether_crc(ETH_ALEN, mcptr->dmi_addr); 965 crc = ether_crc(ETH_ALEN, ha->addr);
976 crc >>= (32 - 6); /* get top six bits */ 966 crc >>= (32 - 6); /* get top six bits */
977 967
978 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf)); 968 rxctrl.mchash[crc >> 4] |= (1 << (crc & 0xf));
@@ -1185,17 +1175,17 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1185 rd = ks8851_rdreg16(ks, KS_MBIR); 1175 rd = ks8851_rdreg16(ks, KS_MBIR);
1186 1176
1187 if ((rd & both_done) != both_done) { 1177 if ((rd & both_done) != both_done) {
1188 ks_warn(ks, "Memory selftest not finished\n"); 1178 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1189 return 0; 1179 return 0;
1190 } 1180 }
1191 1181
1192 if (rd & MBIR_TXMBFA) { 1182 if (rd & MBIR_TXMBFA) {
1193 ks_err(ks, "TX memory selftest fail\n"); 1183 netdev_err(ks->netdev, "TX memory selftest fail\n");
1194 ret |= 1; 1184 ret |= 1;
1195 } 1185 }
1196 1186
1197 if (rd & MBIR_RXMBFA) { 1187 if (rd & MBIR_RXMBFA) {
1198 ks_err(ks, "RX memory selftest fail\n"); 1188 netdev_err(ks->netdev, "RX memory selftest fail\n");
1199 ret |= 2; 1189 ret |= 2;
1200 } 1190 }
1201 1191
@@ -1293,9 +1283,9 @@ static int __devinit ks8851_probe(struct spi_device *spi)
1293 goto err_netdev; 1283 goto err_netdev;
1294 } 1284 }
1295 1285
1296 dev_info(&spi->dev, "revision %d, MAC %pM, IRQ %d\n", 1286 netdev_info(ndev, "revision %d, MAC %pM, IRQ %d\n",
1297 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)), 1287 CIDER_REV_GET(ks8851_rdreg16(ks, KS_CIDER)),
1298 ndev->dev_addr, ndev->irq); 1288 ndev->dev_addr, ndev->irq);
1299 1289
1300 return 0; 1290 return 0;
1301 1291
@@ -1314,7 +1304,7 @@ static int __devexit ks8851_remove(struct spi_device *spi)
1314 struct ks8851_net *priv = dev_get_drvdata(&spi->dev); 1304 struct ks8851_net *priv = dev_get_drvdata(&spi->dev);
1315 1305
1316 if (netif_msg_drv(priv)) 1306 if (netif_msg_drv(priv))
1317 dev_info(&spi->dev, "remove"); 1307 dev_info(&spi->dev, "remove\n");
1318 1308
1319 unregister_netdev(priv->netdev); 1309 unregister_netdev(priv->netdev);
1320 free_irq(spi->irq, priv); 1310 free_irq(spi->irq, priv);
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index 6354ab3a45a6..2e2c69b24062 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -21,6 +21,8 @@
21 * KS8851 16bit MLL chip from Micrel Inc. 21 * KS8851 16bit MLL chip from Micrel Inc.
22 */ 22 */
23 23
24#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
25
24#include <linux/module.h> 26#include <linux/module.h>
25#include <linux/kernel.h> 27#include <linux/kernel.h>
26#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -361,7 +363,6 @@ static u8 KS_DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x86, 0x95, 0x11 };
361 363
362#define MAX_MCAST_LST 32 364#define MAX_MCAST_LST 32
363#define HW_MCAST_SIZE 8 365#define HW_MCAST_SIZE 8
364#define MAC_ADDR_LEN 6
365 366
366/** 367/**
367 * union ks_tx_hdr - tx header data 368 * union ks_tx_hdr - tx header data
@@ -449,7 +450,7 @@ struct ks_net {
449 u16 promiscuous; 450 u16 promiscuous;
450 u16 all_mcast; 451 u16 all_mcast;
451 u16 mcast_lst_size; 452 u16 mcast_lst_size;
452 u8 mcast_lst[MAX_MCAST_LST][MAC_ADDR_LEN]; 453 u8 mcast_lst[MAX_MCAST_LST][ETH_ALEN];
453 u8 mcast_bits[HW_MCAST_SIZE]; 454 u8 mcast_bits[HW_MCAST_SIZE];
454 u8 mac_addr[6]; 455 u8 mac_addr[6];
455 u8 fid; 456 u8 fid;
@@ -459,11 +460,6 @@ struct ks_net {
459 460
460static int msg_enable; 461static int msg_enable;
461 462
462#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
463#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
464#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
465#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
466
467#define BE3 0x8000 /* Byte Enable 3 */ 463#define BE3 0x8000 /* Byte Enable 3 */
468#define BE2 0x4000 /* Byte Enable 2 */ 464#define BE2 0x4000 /* Byte Enable 2 */
469#define BE1 0x2000 /* Byte Enable 1 */ 465#define BE1 0x2000 /* Byte Enable 1 */
@@ -625,8 +621,7 @@ static void ks_set_powermode(struct ks_net *ks, unsigned pwrmode)
625{ 621{
626 unsigned pmecr; 622 unsigned pmecr;
627 623
628 if (netif_msg_hw(ks)) 624 netif_dbg(ks, hw, ks->netdev, "setting power mode %d\n", pwrmode);
629 ks_dbg(ks, "setting power mode %d\n", pwrmode);
630 625
631 ks_rdreg16(ks, KS_GRR); 626 ks_rdreg16(ks, KS_GRR);
632 pmecr = ks_rdreg16(ks, KS_PMECR); 627 pmecr = ks_rdreg16(ks, KS_PMECR);
@@ -806,11 +801,10 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
806 /* read data block including CRC 4 bytes */ 801 /* read data block including CRC 4 bytes */
807 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len); 802 ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
808 skb_put(skb, frame_hdr->len); 803 skb_put(skb, frame_hdr->len);
809 skb->dev = netdev;
810 skb->protocol = eth_type_trans(skb, netdev); 804 skb->protocol = eth_type_trans(skb, netdev);
811 netif_rx(skb); 805 netif_rx(skb);
812 } else { 806 } else {
813 printk(KERN_ERR "%s: err:skb alloc\n", __func__); 807 pr_err("%s: err:skb alloc\n", __func__);
814 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF)); 808 ks_wrreg16(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_RRXEF));
815 if (skb) 809 if (skb)
816 dev_kfree_skb_irq(skb); 810 dev_kfree_skb_irq(skb);
@@ -837,9 +831,8 @@ static void ks_update_link_status(struct net_device *netdev, struct ks_net *ks)
837 netif_carrier_off(netdev); 831 netif_carrier_off(netdev);
838 link_up_status = false; 832 link_up_status = false;
839 } 833 }
840 if (netif_msg_link(ks)) 834 netif_dbg(ks, link, ks->netdev,
841 ks_dbg(ks, "%s: %s\n", 835 "%s: %s\n", __func__, link_up_status ? "UP" : "DOWN");
842 __func__, link_up_status ? "UP" : "DOWN");
843} 836}
844 837
845/** 838/**
@@ -909,15 +902,13 @@ static int ks_net_open(struct net_device *netdev)
909 * else at the moment. 902 * else at the moment.
910 */ 903 */
911 904
912 if (netif_msg_ifup(ks)) 905 netif_dbg(ks, ifup, ks->netdev, "%s - entry\n", __func__);
913 ks_dbg(ks, "%s - entry\n", __func__);
914 906
915 /* reset the HW */ 907 /* reset the HW */
916 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev); 908 err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
917 909
918 if (err) { 910 if (err) {
919 printk(KERN_ERR "Failed to request IRQ: %d: %d\n", 911 pr_err("Failed to request IRQ: %d: %d\n", ks->irq, err);
920 ks->irq, err);
921 return err; 912 return err;
922 } 913 }
923 914
@@ -930,8 +921,7 @@ static int ks_net_open(struct net_device *netdev)
930 ks_enable_qmu(ks); 921 ks_enable_qmu(ks);
931 netif_start_queue(ks->netdev); 922 netif_start_queue(ks->netdev);
932 923
933 if (netif_msg_ifup(ks)) 924 netif_dbg(ks, ifup, ks->netdev, "network device up\n");
934 ks_dbg(ks, "network device %s up\n", netdev->name);
935 925
936 return 0; 926 return 0;
937} 927}
@@ -948,8 +938,7 @@ static int ks_net_stop(struct net_device *netdev)
948{ 938{
949 struct ks_net *ks = netdev_priv(netdev); 939 struct ks_net *ks = netdev_priv(netdev);
950 940
951 if (netif_msg_ifdown(ks)) 941 netif_info(ks, ifdown, netdev, "shutting down\n");
952 ks_info(ks, "%s: shutting down\n", netdev->name);
953 942
954 netif_stop_queue(netdev); 943 netif_stop_queue(netdev);
955 944
@@ -1181,7 +1170,7 @@ static void ks_set_mcast(struct ks_net *ks, u16 mcast)
1181static void ks_set_rx_mode(struct net_device *netdev) 1170static void ks_set_rx_mode(struct net_device *netdev)
1182{ 1171{
1183 struct ks_net *ks = netdev_priv(netdev); 1172 struct ks_net *ks = netdev_priv(netdev);
1184 struct dev_mc_list *ptr; 1173 struct netdev_hw_addr *ha;
1185 1174
1186 /* Turn on/off promiscuous mode. */ 1175 /* Turn on/off promiscuous mode. */
1187 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC) 1176 if ((netdev->flags & IFF_PROMISC) == IFF_PROMISC)
@@ -1198,13 +1187,12 @@ static void ks_set_rx_mode(struct net_device *netdev)
1198 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) { 1187 if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
1199 int i = 0; 1188 int i = 0;
1200 1189
1201 netdev_for_each_mc_addr(ptr, netdev) { 1190 netdev_for_each_mc_addr(ha, netdev) {
1202 if (!(*ptr->dmi_addr & 1)) 1191 if (!(*ha->addr & 1))
1203 continue; 1192 continue;
1204 if (i >= MAX_MCAST_LST) 1193 if (i >= MAX_MCAST_LST)
1205 break; 1194 break;
1206 memcpy(ks->mcast_lst[i++], ptr->dmi_addr, 1195 memcpy(ks->mcast_lst[i++], ha->addr, ETH_ALEN);
1207 MAC_ADDR_LEN);
1208 } 1196 }
1209 ks->mcast_lst_size = (u8)i; 1197 ks->mcast_lst_size = (u8)i;
1210 ks_set_grpaddr(ks); 1198 ks_set_grpaddr(ks);
@@ -1430,21 +1418,21 @@ static int ks_read_selftest(struct ks_net *ks)
1430 rd = ks_rdreg16(ks, KS_MBIR); 1418 rd = ks_rdreg16(ks, KS_MBIR);
1431 1419
1432 if ((rd & both_done) != both_done) { 1420 if ((rd & both_done) != both_done) {
1433 ks_warn(ks, "Memory selftest not finished\n"); 1421 netdev_warn(ks->netdev, "Memory selftest not finished\n");
1434 return 0; 1422 return 0;
1435 } 1423 }
1436 1424
1437 if (rd & MBIR_TXMBFA) { 1425 if (rd & MBIR_TXMBFA) {
1438 ks_err(ks, "TX memory selftest fails\n"); 1426 netdev_err(ks->netdev, "TX memory selftest fails\n");
1439 ret |= 1; 1427 ret |= 1;
1440 } 1428 }
1441 1429
1442 if (rd & MBIR_RXMBFA) { 1430 if (rd & MBIR_RXMBFA) {
1443 ks_err(ks, "RX memory selftest fails\n"); 1431 netdev_err(ks->netdev, "RX memory selftest fails\n");
1444 ret |= 2; 1432 ret |= 2;
1445 } 1433 }
1446 1434
1447 ks_info(ks, "the selftest passes\n"); 1435 netdev_info(ks->netdev, "the selftest passes\n");
1448 return ret; 1436 return ret;
1449} 1437}
1450 1438
@@ -1515,7 +1503,7 @@ static int ks_hw_init(struct ks_net *ks)
1515 ks->frame_head_info = (struct type_frame_head *) \ 1503 ks->frame_head_info = (struct type_frame_head *) \
1516 kmalloc(MHEADER_SIZE, GFP_KERNEL); 1504 kmalloc(MHEADER_SIZE, GFP_KERNEL);
1517 if (!ks->frame_head_info) { 1505 if (!ks->frame_head_info) {
1518 printk(KERN_ERR "Error: Fail to allocate frame memory\n"); 1506 pr_err("Error: Fail to allocate frame memory\n");
1519 return false; 1507 return false;
1520 } 1508 }
1521 1509
@@ -1581,7 +1569,7 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1581 ks->mii.mdio_read = ks_phy_read; 1569 ks->mii.mdio_read = ks_phy_read;
1582 ks->mii.mdio_write = ks_phy_write; 1570 ks->mii.mdio_write = ks_phy_write;
1583 1571
1584 ks_info(ks, "message enable is %d\n", msg_enable); 1572 netdev_info(netdev, "message enable is %d\n", msg_enable);
1585 /* set the default message enable */ 1573 /* set the default message enable */
1586 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV | 1574 ks->msg_enable = netif_msg_init(msg_enable, (NETIF_MSG_DRV |
1587 NETIF_MSG_PROBE | 1575 NETIF_MSG_PROBE |
@@ -1590,13 +1578,13 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1590 1578
1591 /* simple check for a valid chip being connected to the bus */ 1579 /* simple check for a valid chip being connected to the bus */
1592 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) { 1580 if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
1593 ks_err(ks, "failed to read device ID\n"); 1581 netdev_err(netdev, "failed to read device ID\n");
1594 err = -ENODEV; 1582 err = -ENODEV;
1595 goto err_register; 1583 goto err_register;
1596 } 1584 }
1597 1585
1598 if (ks_read_selftest(ks)) { 1586 if (ks_read_selftest(ks)) {
1599 ks_err(ks, "failed to read device ID\n"); 1587 netdev_err(netdev, "failed to read device ID\n");
1600 err = -ENODEV; 1588 err = -ENODEV;
1601 goto err_register; 1589 goto err_register;
1602 } 1590 }
@@ -1627,9 +1615,8 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
1627 1615
1628 id = ks_rdreg16(ks, KS_CIDER); 1616 id = ks_rdreg16(ks, KS_CIDER);
1629 1617
1630 printk(KERN_INFO DRV_NAME 1618 netdev_info(netdev, "Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n",
1631 " Found chip, family: 0x%x, id: 0x%x, rev: 0x%x\n", 1619 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1632 (id >> 8) & 0xff, (id >> 4) & 0xf, (id >> 1) & 0x7);
1633 return 0; 1620 return 0;
1634 1621
1635err_register: 1622err_register:
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0606a1f359fb..cc0bc8a26085 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -14,10 +14,11 @@
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 */ 15 */
16 16
17#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
17#include <linux/init.h> 19#include <linux/init.h>
18#include <linux/kernel.h> 20#include <linux/kernel.h>
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/version.h>
21#include <linux/ioport.h> 22#include <linux/ioport.h>
22#include <linux/pci.h> 23#include <linux/pci.h>
23#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
@@ -1484,11 +1485,6 @@ struct dev_priv {
1484 int promiscuous; 1485 int promiscuous;
1485}; 1486};
1486 1487
1487#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
1488#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
1489#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
1490#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
1491
1492#define DRV_NAME "KSZ884X PCI" 1488#define DRV_NAME "KSZ884X PCI"
1493#define DEVICE_NAME "KSZ884x PCI" 1489#define DEVICE_NAME "KSZ884x PCI"
1494#define DRV_VERSION "1.0.0" 1490#define DRV_VERSION "1.0.0"
@@ -3835,7 +3831,7 @@ static void ksz_check_desc_num(struct ksz_desc_info *info)
3835 alloc >>= 1; 3831 alloc >>= 1;
3836 } 3832 }
3837 if (alloc != 1 || shift < MIN_DESC_SHIFT) { 3833 if (alloc != 1 || shift < MIN_DESC_SHIFT) {
3838 printk(KERN_ALERT "Hardware descriptor numbers not right!\n"); 3834 pr_alert("Hardware descriptor numbers not right!\n");
3839 while (alloc) { 3835 while (alloc) {
3840 shift++; 3836 shift++;
3841 alloc >>= 1; 3837 alloc >>= 1;
@@ -4546,8 +4542,7 @@ static int ksz_alloc_mem(struct dev_info *adapter)
4546 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) / 4542 (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
4547 DESC_ALIGNMENT) * DESC_ALIGNMENT); 4543 DESC_ALIGNMENT) * DESC_ALIGNMENT);
4548 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc)) 4544 if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
4549 printk(KERN_ALERT 4545 pr_alert("Hardware descriptor size not right!\n");
4550 "Hardware descriptor size not right!\n");
4551 ksz_check_desc_num(&hw->rx_desc_info); 4546 ksz_check_desc_num(&hw->rx_desc_info);
4552 ksz_check_desc_num(&hw->tx_desc_info); 4547 ksz_check_desc_num(&hw->tx_desc_info);
4553 4548
@@ -4689,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
4689 int frag; 4684 int frag;
4690 skb_frag_t *this_frag; 4685 skb_frag_t *this_frag;
4691 4686
4692 dma_buf->len = skb->len - skb->data_len; 4687 dma_buf->len = skb_headlen(skb);
4693 4688
4694 dma_buf->dma = pci_map_single( 4689 dma_buf->dma = pci_map_single(
4695 hw_priv->pdev, skb->data, dma_buf->len, 4690 hw_priv->pdev, skb->data, dma_buf->len,
@@ -5049,8 +5044,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5049 dma_buf->skb->data, packet_len); 5044 dma_buf->skb->data, packet_len);
5050 } while (0); 5045 } while (0);
5051 5046
5052 skb->dev = dev;
5053
5054 skb->protocol = eth_type_trans(skb, dev); 5047 skb->protocol = eth_type_trans(skb, dev);
5055 5048
5056 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP)) 5049 if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@ -5061,8 +5054,6 @@ static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
5061 priv->stats.rx_bytes += packet_len; 5054 priv->stats.rx_bytes += packet_len;
5062 5055
5063 /* Notify upper layer for received packet. */ 5056 /* Notify upper layer for received packet. */
5064 dev->last_rx = jiffies;
5065
5066 rx_status = netif_rx(skb); 5057 rx_status = netif_rx(skb);
5067 5058
5068 return 0; 5059 return 0;
@@ -5320,10 +5311,10 @@ static irqreturn_t netdev_intr(int irq, void *dev_id)
5320 u32 data; 5311 u32 data;
5321 5312
5322 hw->intr_mask &= ~KS884X_INT_TX_STOPPED; 5313 hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
5323 printk(KERN_INFO "Tx stopped\n"); 5314 pr_info("Tx stopped\n");
5324 data = readl(hw->io + KS_DMA_TX_CTRL); 5315 data = readl(hw->io + KS_DMA_TX_CTRL);
5325 if (!(data & DMA_TX_ENABLE)) 5316 if (!(data & DMA_TX_ENABLE))
5326 printk(KERN_INFO "Tx disabled\n"); 5317 pr_info("Tx disabled\n");
5327 break; 5318 break;
5328 } 5319 }
5329 } while (0); 5320 } while (0);
@@ -5496,6 +5487,18 @@ static int prepare_hardware(struct net_device *dev)
5496 return 0; 5487 return 0;
5497} 5488}
5498 5489
5490static void set_media_state(struct net_device *dev, int media_state)
5491{
5492 struct dev_priv *priv = netdev_priv(dev);
5493
5494 if (media_state == priv->media_state)
5495 netif_carrier_on(dev);
5496 else
5497 netif_carrier_off(dev);
5498 netif_info(priv, link, dev, "link %s\n",
5499 media_state == priv->media_state ? "on" : "off");
5500}
5501
5499/** 5502/**
5500 * netdev_open - open network device 5503 * netdev_open - open network device
5501 * @dev: Network device. 5504 * @dev: Network device.
@@ -5585,15 +5588,7 @@ static int netdev_open(struct net_device *dev)
5585 5588
5586 priv->media_state = port->linked->state; 5589 priv->media_state = port->linked->state;
5587 5590
5588 if (media_connected == priv->media_state) 5591 set_media_state(dev, media_connected);
5589 netif_carrier_on(dev);
5590 else
5591 netif_carrier_off(dev);
5592 if (netif_msg_link(priv))
5593 printk(KERN_INFO "%s link %s\n", dev->name,
5594 (media_connected == priv->media_state ?
5595 "on" : "off"));
5596
5597 netif_start_queue(dev); 5592 netif_start_queue(dev);
5598 5593
5599 return 0; 5594 return 0;
@@ -5767,7 +5762,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
5767 struct dev_priv *priv = netdev_priv(dev); 5762 struct dev_priv *priv = netdev_priv(dev);
5768 struct dev_info *hw_priv = priv->adapter; 5763 struct dev_info *hw_priv = priv->adapter;
5769 struct ksz_hw *hw = &hw_priv->hw; 5764 struct ksz_hw *hw = &hw_priv->hw;
5770 struct dev_mc_list *mc_ptr; 5765 struct netdev_hw_addr *ha;
5771 int multicast = (dev->flags & IFF_ALLMULTI); 5766 int multicast = (dev->flags & IFF_ALLMULTI);
5772 5767
5773 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC)); 5768 dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
@@ -5784,7 +5779,7 @@ static void netdev_set_rx_mode(struct net_device *dev)
5784 int i = 0; 5779 int i = 0;
5785 5780
5786 /* List too big to support so turn on all multicast mode. */ 5781 /* List too big to support so turn on all multicast mode. */
5787 if (dev->mc_count > MAX_MULTICAST_LIST) { 5782 if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
5788 if (MAX_MULTICAST_LIST != hw->multi_list_size) { 5783 if (MAX_MULTICAST_LIST != hw->multi_list_size) {
5789 hw->multi_list_size = MAX_MULTICAST_LIST; 5784 hw->multi_list_size = MAX_MULTICAST_LIST;
5790 ++hw->all_multi; 5785 ++hw->all_multi;
@@ -5793,13 +5788,12 @@ static void netdev_set_rx_mode(struct net_device *dev)
5793 return; 5788 return;
5794 } 5789 }
5795 5790
5796 netdev_for_each_mc_addr(mc_ptr, dev) { 5791 netdev_for_each_mc_addr(ha, dev) {
5797 if (!(*mc_ptr->dmi_addr & 1)) 5792 if (!(*ha->addr & 1))
5798 continue; 5793 continue;
5799 if (i >= MAX_MULTICAST_LIST) 5794 if (i >= MAX_MULTICAST_LIST)
5800 break; 5795 break;
5801 memcpy(hw->multi_list[i++], mc_ptr->dmi_addr, 5796 memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
5802 MAC_ADDR_LEN);
5803 } 5797 }
5804 hw->multi_list_size = (u8) i; 5798 hw->multi_list_size = (u8) i;
5805 hw_set_grp_addr(hw); 5799 hw_set_grp_addr(hw);
@@ -6683,16 +6677,8 @@ static void update_link(struct net_device *dev, struct dev_priv *priv,
6683{ 6677{
6684 if (priv->media_state != port->linked->state) { 6678 if (priv->media_state != port->linked->state) {
6685 priv->media_state = port->linked->state; 6679 priv->media_state = port->linked->state;
6686 if (netif_running(dev)) { 6680 if (netif_running(dev))
6687 if (media_connected == priv->media_state) 6681 set_media_state(dev, media_connected);
6688 netif_carrier_on(dev);
6689 else
6690 netif_carrier_off(dev);
6691 if (netif_msg_link(priv))
6692 printk(KERN_INFO "%s link %s\n", dev->name,
6693 (media_connected == priv->media_state ?
6694 "on" : "off"));
6695 }
6696 } 6682 }
6697} 6683}
6698 6684
@@ -6986,7 +6972,7 @@ static int __init pcidev_init(struct pci_dev *pdev,
6986 int pi; 6972 int pi;
6987 int port_count; 6973 int port_count;
6988 int result; 6974 int result;
6989 char banner[80]; 6975 char banner[sizeof(version)];
6990 struct ksz_switch *sw = NULL; 6976 struct ksz_switch *sw = NULL;
6991 6977
6992 result = pci_enable_device(pdev); 6978 result = pci_enable_device(pdev);
@@ -7010,10 +6996,9 @@ static int __init pcidev_init(struct pci_dev *pdev,
7010 6996
7011 result = -ENOMEM; 6997 result = -ENOMEM;
7012 6998
7013 info = kmalloc(sizeof(struct platform_info), GFP_KERNEL); 6999 info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
7014 if (!info) 7000 if (!info)
7015 goto pcidev_init_dev_err; 7001 goto pcidev_init_dev_err;
7016 memset(info, 0, sizeof(struct platform_info));
7017 7002
7018 hw_priv = &info->dev_info; 7003 hw_priv = &info->dev_info;
7019 hw_priv->pdev = pdev; 7004 hw_priv->pdev = pdev;
@@ -7027,15 +7012,15 @@ static int __init pcidev_init(struct pci_dev *pdev,
7027 cnt = hw_init(hw); 7012 cnt = hw_init(hw);
7028 if (!cnt) { 7013 if (!cnt) {
7029 if (msg_enable & NETIF_MSG_PROBE) 7014 if (msg_enable & NETIF_MSG_PROBE)
7030 printk(KERN_ALERT "chip not detected\n"); 7015 pr_alert("chip not detected\n");
7031 result = -ENODEV; 7016 result = -ENODEV;
7032 goto pcidev_init_alloc_err; 7017 goto pcidev_init_alloc_err;
7033 } 7018 }
7034 7019
7035 sprintf(banner, "%s\n", version); 7020 snprintf(banner, sizeof(banner), "%s", version);
7036 banner[13] = cnt + '0'; 7021 banner[13] = cnt + '0'; /* Replace x in "Micrel KSZ884x" */
7037 ks_info(hw_priv, "%s", banner); 7022 dev_info(&hw_priv->pdev->dev, "%s\n", banner);
7038 ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq); 7023 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
7039 7024
7040 /* Assume device is KSZ8841. */ 7025 /* Assume device is KSZ8841. */
7041 hw->dev_count = 1; 7026 hw->dev_count = 1;
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 973390b82ec2..61c38ab4c247 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1388,7 +1388,7 @@ static void set_multicast_list(struct net_device *dev)
1388 } 1388 }
1389 1389
1390 if (!netdev_mc_empty(dev)) { 1390 if (!netdev_mc_empty(dev)) {
1391 struct dev_mc_list *dmi; 1391 struct netdev_hw_addr *ha;
1392 unsigned char *cp; 1392 unsigned char *cp;
1393 struct mc_cmd *cmd; 1393 struct mc_cmd *cmd;
1394 1394
@@ -1396,10 +1396,10 @@ static void set_multicast_list(struct net_device *dev)
1396 cmd->cmd.command = SWAP16(CmdMulticastList); 1396 cmd->cmd.command = SWAP16(CmdMulticastList);
1397 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6); 1397 cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
1398 cp = cmd->mc_addrs; 1398 cp = cmd->mc_addrs;
1399 netdev_for_each_mc_addr(dmi, dev) { 1399 netdev_for_each_mc_addr(ha, dev) {
1400 if (!cnt--) 1400 if (!cnt--)
1401 break; 1401 break;
1402 memcpy(cp, dmi->dmi_addr, 6); 1402 memcpy(cp, ha->addr, 6);
1403 if (i596_debug > 1) 1403 if (i596_debug > 1)
1404 DEB(DEB_MULTI, 1404 DEB(DEB_MULTI,
1405 printk(KERN_DEBUG 1405 printk(KERN_DEBUG
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 56f66f485400..526dc9cbc3c6 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -905,10 +905,10 @@ static struct net_device_stats *__ei_get_stats(struct net_device *dev)
905 905
906static inline void make_mc_bits(u8 *bits, struct net_device *dev) 906static inline void make_mc_bits(u8 *bits, struct net_device *dev)
907{ 907{
908 struct dev_mc_list *dmi; 908 struct netdev_hw_addr *ha;
909 909
910 netdev_for_each_mc_addr(dmi, dev) { 910 netdev_for_each_mc_addr(ha, dev) {
911 u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 911 u32 crc = ether_crc(ETH_ALEN, ha->addr);
912 /* 912 /*
913 * The 8390 uses the 6 most significant bits of the 913 * The 8390 uses the 6 most significant bits of the
914 * CRC to index the multicast table. 914 * CRC to index the multicast table.
diff --git a/drivers/net/ll_temac.h b/drivers/net/ll_temac.h
index 1af66a1e6911..c03358434acb 100644
--- a/drivers/net/ll_temac.h
+++ b/drivers/net/ll_temac.h
@@ -5,8 +5,11 @@
5#include <linux/netdevice.h> 5#include <linux/netdevice.h>
6#include <linux/of.h> 6#include <linux/of.h>
7#include <linux/spinlock.h> 7#include <linux/spinlock.h>
8
9#ifdef CONFIG_PPC_DCR
8#include <asm/dcr.h> 10#include <asm/dcr.h>
9#include <asm/dcr-regs.h> 11#include <asm/dcr-regs.h>
12#endif
10 13
11/* packet size info */ 14/* packet size info */
12#define XTE_HDR_SIZE 14 /* size of Ethernet header */ 15#define XTE_HDR_SIZE 14 /* size of Ethernet header */
@@ -290,9 +293,6 @@ This option defaults to enabled (set) */
290 293
291#define TX_CONTROL_CALC_CSUM_MASK 1 294#define TX_CONTROL_CALC_CSUM_MASK 1
292 295
293#define XTE_ALIGN 32
294#define BUFFER_ALIGN(adr) ((XTE_ALIGN - ((u32) adr)) % XTE_ALIGN)
295
296#define MULTICAST_CAM_TABLE_NUM 4 296#define MULTICAST_CAM_TABLE_NUM 4
297 297
298/* TX/RX CURDESC_PTR points to first descriptor */ 298/* TX/RX CURDESC_PTR points to first descriptor */
@@ -335,9 +335,15 @@ struct temac_local {
335 struct mii_bus *mii_bus; /* MII bus reference */ 335 struct mii_bus *mii_bus; /* MII bus reference */
336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ 336 int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */
337 337
338 /* IO registers and IRQs */ 338 /* IO registers, dma functions and IRQs */
339 void __iomem *regs; 339 void __iomem *regs;
340 void __iomem *sdma_regs;
341#ifdef CONFIG_PPC_DCR
340 dcr_host_t sdma_dcrs; 342 dcr_host_t sdma_dcrs;
343#endif
344 u32 (*dma_in)(struct temac_local *, int);
345 void (*dma_out)(struct temac_local *, int, u32);
346
341 int tx_irq; 347 int tx_irq;
342 int rx_irq; 348 int rx_irq;
343 int emac_num; 349 int emac_num;
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index ba617e3cf1bb..78c9a2e6e51e 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -20,9 +20,6 @@
20 * or rx, so this should be okay. 20 * or rx, so this should be okay.
21 * 21 *
22 * TODO: 22 * TODO:
23 * - Fix driver to work on more than just Virtex5. Right now the driver
24 * assumes that the locallink DMA registers are accessed via DCR
25 * instructions.
26 * - Factor out locallink DMA code into separate driver 23 * - Factor out locallink DMA code into separate driver
27 * - Fix multicast assignment. 24 * - Fix multicast assignment.
28 * - Fix support for hardware checksumming. 25 * - Fix support for hardware checksumming.
@@ -116,17 +113,86 @@ void temac_indirect_out32(struct temac_local *lp, int reg, u32 value)
116 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg); 113 temac_iow(lp, XTE_CTL0_OFFSET, CNTLREG_WRITE_ENABLE_MASK | reg);
117} 114}
118 115
116/**
117 * temac_dma_in32 - Memory mapped DMA read, this function expects a
118 * register input that is based on DCR word addresses which
119 * are then converted to memory mapped byte addresses
120 */
119static u32 temac_dma_in32(struct temac_local *lp, int reg) 121static u32 temac_dma_in32(struct temac_local *lp, int reg)
120{ 122{
121 return dcr_read(lp->sdma_dcrs, reg); 123 return in_be32((u32 *)(lp->sdma_regs + (reg << 2)));
122} 124}
123 125
126/**
127 * temac_dma_out32 - Memory mapped DMA read, this function expects a
128 * register input that is based on DCR word addresses which
129 * are then converted to memory mapped byte addresses
130 */
124static void temac_dma_out32(struct temac_local *lp, int reg, u32 value) 131static void temac_dma_out32(struct temac_local *lp, int reg, u32 value)
125{ 132{
133 out_be32((u32 *)(lp->sdma_regs + (reg << 2)), value);
134}
135
136/* DMA register access functions can be DCR based or memory mapped.
137 * The PowerPC 440 is DCR based, the PowerPC 405 and MicroBlaze are both
138 * memory mapped.
139 */
140#ifdef CONFIG_PPC_DCR
141
142/**
143 * temac_dma_dcr_in32 - DCR based DMA read
144 */
145static u32 temac_dma_dcr_in(struct temac_local *lp, int reg)
146{
147 return dcr_read(lp->sdma_dcrs, reg);
148}
149
150/**
151 * temac_dma_dcr_out32 - DCR based DMA write
152 */
153static void temac_dma_dcr_out(struct temac_local *lp, int reg, u32 value)
154{
126 dcr_write(lp->sdma_dcrs, reg, value); 155 dcr_write(lp->sdma_dcrs, reg, value);
127} 156}
128 157
129/** 158/**
159 * temac_dcr_setup - If the DMA is DCR based, then setup the address and
160 * I/O functions
161 */
162static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
163 struct device_node *np)
164{
165 unsigned int dcrs;
166
167 /* setup the dcr address mapping if it's in the device tree */
168
169 dcrs = dcr_resource_start(np, 0);
170 if (dcrs != 0) {
171 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
172 lp->dma_in = temac_dma_dcr_in;
173 lp->dma_out = temac_dma_dcr_out;
174 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
175 return 0;
176 }
177 /* no DCR in the device tree, indicate a failure */
178 return -1;
179}
180
181#else
182
183/*
184 * temac_dcr_setup - This is a stub for when DCR is not supported,
185 * such as with MicroBlaze
186 */
187static int temac_dcr_setup(struct temac_local *lp, struct of_device *op,
188 struct device_node *np)
189{
190 return -1;
191}
192
193#endif
194
195/**
130 * temac_dma_bd_init - Setup buffer descriptor rings 196 * temac_dma_bd_init - Setup buffer descriptor rings
131 */ 197 */
132static int temac_dma_bd_init(struct net_device *ndev) 198static int temac_dma_bd_init(struct net_device *ndev)
@@ -156,14 +222,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
156 lp->rx_bd_v[i].next = lp->rx_bd_p + 222 lp->rx_bd_v[i].next = lp->rx_bd_p +
157 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM); 223 sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM);
158 224
159 skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE 225 skb = netdev_alloc_skb_ip_align(ndev,
160 + XTE_ALIGN, GFP_ATOMIC); 226 XTE_MAX_JUMBO_FRAME_SIZE);
227
161 if (skb == 0) { 228 if (skb == 0) {
162 dev_err(&ndev->dev, "alloc_skb error %d\n", i); 229 dev_err(&ndev->dev, "alloc_skb error %d\n", i);
163 return -1; 230 return -1;
164 } 231 }
165 lp->rx_skb[i] = skb; 232 lp->rx_skb[i] = skb;
166 skb_reserve(skb, BUFFER_ALIGN(skb->data));
167 /* returns physical address of skb->data */ 233 /* returns physical address of skb->data */
168 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 234 lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent,
169 skb->data, 235 skb->data,
@@ -173,23 +239,23 @@ static int temac_dma_bd_init(struct net_device *ndev)
173 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND; 239 lp->rx_bd_v[i].app0 = STS_CTRL_APP0_IRQONEND;
174 } 240 }
175 241
176 temac_dma_out32(lp, TX_CHNL_CTRL, 0x10220400 | 242 lp->dma_out(lp, TX_CHNL_CTRL, 0x10220400 |
177 CHNL_CTRL_IRQ_EN | 243 CHNL_CTRL_IRQ_EN |
178 CHNL_CTRL_IRQ_DLY_EN | 244 CHNL_CTRL_IRQ_DLY_EN |
179 CHNL_CTRL_IRQ_COAL_EN); 245 CHNL_CTRL_IRQ_COAL_EN);
180 /* 0x10220483 */ 246 /* 0x10220483 */
181 /* 0x00100483 */ 247 /* 0x00100483 */
182 temac_dma_out32(lp, RX_CHNL_CTRL, 0xff010000 | 248 lp->dma_out(lp, RX_CHNL_CTRL, 0xff010000 |
183 CHNL_CTRL_IRQ_EN | 249 CHNL_CTRL_IRQ_EN |
184 CHNL_CTRL_IRQ_DLY_EN | 250 CHNL_CTRL_IRQ_DLY_EN |
185 CHNL_CTRL_IRQ_COAL_EN | 251 CHNL_CTRL_IRQ_COAL_EN |
186 CHNL_CTRL_IRQ_IOE); 252 CHNL_CTRL_IRQ_IOE);
187 /* 0xff010283 */ 253 /* 0xff010283 */
188 254
189 temac_dma_out32(lp, RX_CURDESC_PTR, lp->rx_bd_p); 255 lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
190 temac_dma_out32(lp, RX_TAILDESC_PTR, 256 lp->dma_out(lp, RX_TAILDESC_PTR,
191 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 257 lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
192 temac_dma_out32(lp, TX_CURDESC_PTR, lp->tx_bd_p); 258 lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
193 259
194 return 0; 260 return 0;
195} 261}
@@ -251,20 +317,20 @@ static void temac_set_multicast_list(struct net_device *ndev)
251 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK); 317 temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
252 dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 318 dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
253 } else if (!netdev_mc_empty(ndev)) { 319 } else if (!netdev_mc_empty(ndev)) {
254 struct dev_mc_list *mclist; 320 struct netdev_hw_addr *ha;
255 321
256 i = 0; 322 i = 0;
257 netdev_for_each_mc_addr(mclist, ndev) { 323 netdev_for_each_mc_addr(ha, ndev) {
258 if (i >= MULTICAST_CAM_TABLE_NUM) 324 if (i >= MULTICAST_CAM_TABLE_NUM)
259 break; 325 break;
260 multi_addr_msw = ((mclist->dmi_addr[3] << 24) | 326 multi_addr_msw = ((ha->addr[3] << 24) |
261 (mclist->dmi_addr[2] << 16) | 327 (ha->addr[2] << 16) |
262 (mclist->dmi_addr[1] << 8) | 328 (ha->addr[1] << 8) |
263 (mclist->dmi_addr[0])); 329 (ha->addr[0]));
264 temac_indirect_out32(lp, XTE_MAW0_OFFSET, 330 temac_indirect_out32(lp, XTE_MAW0_OFFSET,
265 multi_addr_msw); 331 multi_addr_msw);
266 multi_addr_lsw = ((mclist->dmi_addr[5] << 8) | 332 multi_addr_lsw = ((ha->addr[5] << 8) |
267 (mclist->dmi_addr[4]) | (i << 16)); 333 (ha->addr[4]) | (i << 16));
268 temac_indirect_out32(lp, XTE_MAW1_OFFSET, 334 temac_indirect_out32(lp, XTE_MAW1_OFFSET,
269 multi_addr_lsw); 335 multi_addr_lsw);
270 i++; 336 i++;
@@ -427,9 +493,9 @@ static void temac_device_reset(struct net_device *ndev)
427 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK); 493 temac_indirect_out32(lp, XTE_RXC1_OFFSET, val & ~XTE_RXC1_RXEN_MASK);
428 494
429 /* Reset Local Link (DMA) */ 495 /* Reset Local Link (DMA) */
430 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); 496 lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
431 timeout = 1000; 497 timeout = 1000;
432 while (temac_dma_in32(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) { 498 while (lp->dma_in(lp, DMA_CONTROL_REG) & DMA_CONTROL_RST) {
433 udelay(1); 499 udelay(1);
434 if (--timeout == 0) { 500 if (--timeout == 0) {
435 dev_err(&ndev->dev, 501 dev_err(&ndev->dev,
@@ -437,7 +503,7 @@ static void temac_device_reset(struct net_device *ndev)
437 break; 503 break;
438 } 504 }
439 } 505 }
440 temac_dma_out32(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE); 506 lp->dma_out(lp, DMA_CONTROL_REG, DMA_TAIL_ENABLE);
441 507
442 temac_dma_bd_init(ndev); 508 temac_dma_bd_init(ndev);
443 509
@@ -598,7 +664,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
598 lp->tx_bd_tail = 0; 664 lp->tx_bd_tail = 0;
599 665
600 /* Kick off the transfer */ 666 /* Kick off the transfer */
601 temac_dma_out32(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */ 667 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
602 668
603 return NETDEV_TX_OK; 669 return NETDEV_TX_OK;
604} 670}
@@ -612,7 +678,6 @@ static void ll_temac_recv(struct net_device *ndev)
612 struct cdmac_bd *cur_p; 678 struct cdmac_bd *cur_p;
613 dma_addr_t tail_p; 679 dma_addr_t tail_p;
614 int length; 680 int length;
615 unsigned long skb_vaddr;
616 unsigned long flags; 681 unsigned long flags;
617 682
618 spin_lock_irqsave(&lp->rx_lock, flags); 683 spin_lock_irqsave(&lp->rx_lock, flags);
@@ -626,8 +691,7 @@ static void ll_temac_recv(struct net_device *ndev)
626 skb = lp->rx_skb[lp->rx_bd_ci]; 691 skb = lp->rx_skb[lp->rx_bd_ci];
627 length = cur_p->app4 & 0x3FFF; 692 length = cur_p->app4 & 0x3FFF;
628 693
629 skb_vaddr = virt_to_bus(skb->data); 694 dma_unmap_single(ndev->dev.parent, cur_p->phys, length,
630 dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
631 DMA_FROM_DEVICE); 695 DMA_FROM_DEVICE);
632 696
633 skb_put(skb, length); 697 skb_put(skb, length);
@@ -640,16 +704,15 @@ static void ll_temac_recv(struct net_device *ndev)
640 ndev->stats.rx_packets++; 704 ndev->stats.rx_packets++;
641 ndev->stats.rx_bytes += length; 705 ndev->stats.rx_bytes += length;
642 706
643 new_skb = alloc_skb(XTE_MAX_JUMBO_FRAME_SIZE + XTE_ALIGN, 707 new_skb = netdev_alloc_skb_ip_align(ndev,
644 GFP_ATOMIC); 708 XTE_MAX_JUMBO_FRAME_SIZE);
709
645 if (new_skb == 0) { 710 if (new_skb == 0) {
646 dev_err(&ndev->dev, "no memory for new sk_buff\n"); 711 dev_err(&ndev->dev, "no memory for new sk_buff\n");
647 spin_unlock_irqrestore(&lp->rx_lock, flags); 712 spin_unlock_irqrestore(&lp->rx_lock, flags);
648 return; 713 return;
649 } 714 }
650 715
651 skb_reserve(new_skb, BUFFER_ALIGN(new_skb->data));
652
653 cur_p->app0 = STS_CTRL_APP0_IRQONEND; 716 cur_p->app0 = STS_CTRL_APP0_IRQONEND;
654 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 717 cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data,
655 XTE_MAX_JUMBO_FRAME_SIZE, 718 XTE_MAX_JUMBO_FRAME_SIZE,
@@ -664,7 +727,7 @@ static void ll_temac_recv(struct net_device *ndev)
664 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 727 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
665 bdstat = cur_p->app0; 728 bdstat = cur_p->app0;
666 } 729 }
667 temac_dma_out32(lp, RX_TAILDESC_PTR, tail_p); 730 lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
668 731
669 spin_unlock_irqrestore(&lp->rx_lock, flags); 732 spin_unlock_irqrestore(&lp->rx_lock, flags);
670} 733}
@@ -675,8 +738,8 @@ static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
675 struct temac_local *lp = netdev_priv(ndev); 738 struct temac_local *lp = netdev_priv(ndev);
676 unsigned int status; 739 unsigned int status;
677 740
678 status = temac_dma_in32(lp, TX_IRQ_REG); 741 status = lp->dma_in(lp, TX_IRQ_REG);
679 temac_dma_out32(lp, TX_IRQ_REG, status); 742 lp->dma_out(lp, TX_IRQ_REG, status);
680 743
681 if (status & (IRQ_COAL | IRQ_DLY)) 744 if (status & (IRQ_COAL | IRQ_DLY))
682 temac_start_xmit_done(lp->ndev); 745 temac_start_xmit_done(lp->ndev);
@@ -693,8 +756,8 @@ static irqreturn_t ll_temac_rx_irq(int irq, void *_ndev)
693 unsigned int status; 756 unsigned int status;
694 757
695 /* Read and clear the status registers */ 758 /* Read and clear the status registers */
696 status = temac_dma_in32(lp, RX_IRQ_REG); 759 status = lp->dma_in(lp, RX_IRQ_REG);
697 temac_dma_out32(lp, RX_IRQ_REG, status); 760 lp->dma_out(lp, RX_IRQ_REG, status);
698 761
699 if (status & (IRQ_COAL | IRQ_DLY)) 762 if (status & (IRQ_COAL | IRQ_DLY))
700 ll_temac_recv(lp->ndev); 763 ll_temac_recv(lp->ndev);
@@ -795,7 +858,7 @@ static ssize_t temac_show_llink_regs(struct device *dev,
795 int i, len = 0; 858 int i, len = 0;
796 859
797 for (i = 0; i < 0x11; i++) 860 for (i = 0; i < 0x11; i++)
798 len += sprintf(buf + len, "%.8x%s", temac_dma_in32(lp, i), 861 len += sprintf(buf + len, "%.8x%s", lp->dma_in(lp, i),
799 (i % 8) == 7 ? "\n" : " "); 862 (i % 8) == 7 ? "\n" : " ");
800 len += sprintf(buf + len, "\n"); 863 len += sprintf(buf + len, "\n");
801 864
@@ -821,7 +884,6 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
821 struct net_device *ndev; 884 struct net_device *ndev;
822 const void *addr; 885 const void *addr;
823 int size, rc = 0; 886 int size, rc = 0;
824 unsigned int dcrs;
825 887
826 /* Init network device structure */ 888 /* Init network device structure */
827 ndev = alloc_etherdev(sizeof(*lp)); 889 ndev = alloc_etherdev(sizeof(*lp));
@@ -871,13 +933,20 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
871 goto nodev; 933 goto nodev;
872 } 934 }
873 935
874 dcrs = dcr_resource_start(np, 0); 936 /* Setup the DMA register accesses, could be DCR or memory mapped */
875 if (dcrs == 0) { 937 if (temac_dcr_setup(lp, op, np)) {
876 dev_err(&op->dev, "could not get DMA register address\n"); 938
877 goto nodev; 939 /* no DCR in the device tree, try non-DCR */
940 lp->sdma_regs = of_iomap(np, 0);
941 if (lp->sdma_regs) {
942 lp->dma_in = temac_dma_in32;
943 lp->dma_out = temac_dma_out32;
944 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
945 } else {
946 dev_err(&op->dev, "unable to map DMA registers\n");
947 goto nodev;
948 }
878 } 949 }
879 lp->sdma_dcrs = dcr_map(np, dcrs, dcr_resource_len(np, 0));
880 dev_dbg(&op->dev, "DCR base: %x\n", dcrs);
881 950
882 lp->rx_irq = irq_of_parse_and_map(np, 0); 951 lp->rx_irq = irq_of_parse_and_map(np, 0);
883 lp->tx_irq = irq_of_parse_and_map(np, 1); 952 lp->tx_irq = irq_of_parse_and_map(np, 1);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index 3e3cc04defd0..72379c5439dc 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1256,7 +1256,7 @@ static void set_multicast_list(struct net_device *dev) {
1256 dev->name, netdev_mc_count(dev)); 1256 dev->name, netdev_mc_count(dev));
1257 1257
1258 if (!netdev_mc_empty(dev)) { 1258 if (!netdev_mc_empty(dev)) {
1259 struct dev_mc_list *dmi; 1259 struct netdev_hw_addr *ha;
1260 char *cp; 1260 char *cp;
1261 cmd = kmalloc(sizeof(struct i596_cmd) + 2 + 1261 cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
1262 netdev_mc_count(dev) * 6, GFP_ATOMIC); 1262 netdev_mc_count(dev) * 6, GFP_ATOMIC);
@@ -1267,8 +1267,8 @@ static void set_multicast_list(struct net_device *dev) {
1267 cmd->command = CmdMulticastList; 1267 cmd->command = CmdMulticastList;
1268 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6; 1268 *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
1269 cp = ((char *)(cmd + 1))+2; 1269 cp = ((char *)(cmd + 1))+2;
1270 netdev_for_each_mc_addr(dmi, dev) { 1270 netdev_for_each_mc_addr(ha, dev) {
1271 memcpy(cp, dmi->dmi_addr, 6); 1271 memcpy(cp, ha->addr, 6);
1272 cp += 6; 1272 cp += 6;
1273 } 1273 }
1274 if (i596_debug & LOG_SRCDST) 1274 if (i596_debug & LOG_SRCDST)
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index c8a18a6203c8..cf7debc865b3 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -793,6 +793,7 @@ static void macb_init_hw(struct macb *bp)
793 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L); 793 config = macb_readl(bp, NCFGR) & MACB_BF(CLK, -1L);
794 config |= MACB_BIT(PAE); /* PAuse Enable */ 794 config |= MACB_BIT(PAE); /* PAuse Enable */
795 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ 795 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
796 config |= MACB_BIT(BIG); /* Receive oversized frames */
796 if (bp->dev->flags & IFF_PROMISC) 797 if (bp->dev->flags & IFF_PROMISC)
797 config |= MACB_BIT(CAF); /* Copy All Frames */ 798 config |= MACB_BIT(CAF); /* Copy All Frames */
798 if (!(bp->dev->flags & IFF_BROADCAST)) 799 if (!(bp->dev->flags & IFF_BROADCAST))
@@ -882,15 +883,15 @@ static int hash_get_index(__u8 *addr)
882 */ 883 */
883static void macb_sethashtable(struct net_device *dev) 884static void macb_sethashtable(struct net_device *dev)
884{ 885{
885 struct dev_mc_list *curr; 886 struct netdev_hw_addr *ha;
886 unsigned long mc_filter[2]; 887 unsigned long mc_filter[2];
887 unsigned int bitnr; 888 unsigned int bitnr;
888 struct macb *bp = netdev_priv(dev); 889 struct macb *bp = netdev_priv(dev);
889 890
890 mc_filter[0] = mc_filter[1] = 0; 891 mc_filter[0] = mc_filter[1] = 0;
891 892
892 netdev_for_each_mc_addr(curr, dev) { 893 netdev_for_each_mc_addr(ha, dev) {
893 bitnr = hash_get_index(curr->dmi_addr); 894 bitnr = hash_get_index(ha->addr);
894 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); 895 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
895 } 896 }
896 897
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 962c41d0c8df..b6855a6476f8 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -599,7 +599,7 @@ static void mace_set_multicast(struct net_device *dev)
599 mp->maccc |= PROM; 599 mp->maccc |= PROM;
600 } else { 600 } else {
601 unsigned char multicast_filter[8]; 601 unsigned char multicast_filter[8];
602 struct dev_mc_list *dmi; 602 struct netdev_hw_addr *ha;
603 603
604 if (dev->flags & IFF_ALLMULTI) { 604 if (dev->flags & IFF_ALLMULTI) {
605 for (i = 0; i < 8; i++) 605 for (i = 0; i < 8; i++)
@@ -607,8 +607,8 @@ static void mace_set_multicast(struct net_device *dev)
607 } else { 607 } else {
608 for (i = 0; i < 8; i++) 608 for (i = 0; i < 8; i++)
609 multicast_filter[i] = 0; 609 multicast_filter[i] = 0;
610 netdev_for_each_mc_addr(dmi, dev) { 610 netdev_for_each_mc_addr(ha, dev) {
611 crc = ether_crc_le(6, dmi->dmi_addr); 611 crc = ether_crc_le(6, ha->addr);
612 i = crc >> 26; /* bit number in multicast_filter */ 612 i = crc >> 26; /* bit number in multicast_filter */
613 multicast_filter[i >> 3] |= 1 << (i & 7); 613 multicast_filter[i >> 3] |= 1 << (i & 7);
614 } 614 }
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 52e9a51c4c4f..a6e19fc8a80a 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -509,7 +509,7 @@ static void mace_set_multicast(struct net_device *dev)
509 mb->maccc |= PROM; 509 mb->maccc |= PROM;
510 } else { 510 } else {
511 unsigned char multicast_filter[8]; 511 unsigned char multicast_filter[8];
512 struct dev_mc_list *dmi; 512 struct netdev_hw_addr *ha;
513 513
514 if (dev->flags & IFF_ALLMULTI) { 514 if (dev->flags & IFF_ALLMULTI) {
515 for (i = 0; i < 8; i++) { 515 for (i = 0; i < 8; i++) {
@@ -518,8 +518,8 @@ static void mace_set_multicast(struct net_device *dev)
518 } else { 518 } else {
519 for (i = 0; i < 8; i++) 519 for (i = 0; i < 8; i++)
520 multicast_filter[i] = 0; 520 multicast_filter[i] = 0;
521 netdev_for_each_mc_addr(dmi, dev) { 521 netdev_for_each_mc_addr(ha, dev) {
522 crc = ether_crc_le(6, dmi->dmi_addr); 522 crc = ether_crc_le(6, ha->addr);
523 /* bit number in multicast_filter */ 523 /* bit number in multicast_filter */
524 i = crc >> 26; 524 i = crc >> 26;
525 multicast_filter[i >> 3] |= 1 << (i & 7); 525 multicast_filter[i >> 3] |= 1 << (i & 7);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 40faa368b07a..9a939d828b47 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -282,7 +282,7 @@ static int macvlan_open(struct net_device *dev)
282 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 282 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
283 goto out; 283 goto out;
284 284
285 err = dev_unicast_add(lowerdev, dev->dev_addr); 285 err = dev_uc_add(lowerdev, dev->dev_addr);
286 if (err < 0) 286 if (err < 0)
287 goto out; 287 goto out;
288 if (dev->flags & IFF_ALLMULTI) { 288 if (dev->flags & IFF_ALLMULTI) {
@@ -294,7 +294,7 @@ static int macvlan_open(struct net_device *dev)
294 return 0; 294 return 0;
295 295
296del_unicast: 296del_unicast:
297 dev_unicast_delete(lowerdev, dev->dev_addr); 297 dev_uc_del(lowerdev, dev->dev_addr);
298out: 298out:
299 return err; 299 return err;
300} 300}
@@ -308,7 +308,7 @@ static int macvlan_stop(struct net_device *dev)
308 if (dev->flags & IFF_ALLMULTI) 308 if (dev->flags & IFF_ALLMULTI)
309 dev_set_allmulti(lowerdev, -1); 309 dev_set_allmulti(lowerdev, -1);
310 310
311 dev_unicast_delete(lowerdev, dev->dev_addr); 311 dev_uc_del(lowerdev, dev->dev_addr);
312 312
313 macvlan_hash_del(vlan); 313 macvlan_hash_del(vlan);
314 return 0; 314 return 0;
@@ -332,11 +332,11 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p)
332 if (macvlan_addr_busy(vlan->port, addr->sa_data)) 332 if (macvlan_addr_busy(vlan->port, addr->sa_data))
333 return -EBUSY; 333 return -EBUSY;
334 334
335 err = dev_unicast_add(lowerdev, addr->sa_data); 335 err = dev_uc_add(lowerdev, addr->sa_data);
336 if (err) 336 if (err)
337 return err; 337 return err;
338 338
339 dev_unicast_delete(lowerdev, dev->dev_addr); 339 dev_uc_del(lowerdev, dev->dev_addr);
340 340
341 macvlan_hash_change_addr(vlan, addr->sa_data); 341 macvlan_hash_change_addr(vlan, addr->sa_data);
342 } 342 }
@@ -748,6 +748,9 @@ static int macvlan_device_event(struct notifier_block *unused,
748 list_for_each_entry_safe(vlan, next, &port->vlans, list) 748 list_for_each_entry_safe(vlan, next, &port->vlans, list)
749 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL); 749 vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
750 break; 750 break;
751 case NETDEV_PRE_TYPE_CHANGE:
752 /* Forbid underlaying device to change its type. */
753 return NOTIFY_BAD;
751 } 754 }
752 return NOTIFY_DONE; 755 return NOTIFY_DONE;
753} 756}
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index abba3cc81f12..85d6420f8404 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -246,8 +246,8 @@ static void macvtap_sock_write_space(struct sock *sk)
246 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 246 !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
247 return; 247 return;
248 248
249 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 249 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
250 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND); 250 wake_up_interruptible_poll(sk_sleep(sk), POLLOUT | POLLWRNORM | POLLWRBAND);
251} 251}
252 252
253static int macvtap_open(struct inode *inode, struct file *file) 253static int macvtap_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 73c3d20c6453..96180c0ec206 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -161,39 +161,29 @@ static void mlx4_en_do_set_mac(struct work_struct *work)
161static void mlx4_en_clear_list(struct net_device *dev) 161static void mlx4_en_clear_list(struct net_device *dev)
162{ 162{
163 struct mlx4_en_priv *priv = netdev_priv(dev); 163 struct mlx4_en_priv *priv = netdev_priv(dev);
164 struct dev_mc_list *plist = priv->mc_list;
165 struct dev_mc_list *next;
166 164
167 while (plist) { 165 kfree(priv->mc_addrs);
168 next = plist->next; 166 priv->mc_addrs_cnt = 0;
169 kfree(plist);
170 plist = next;
171 }
172 priv->mc_list = NULL;
173} 167}
174 168
175static void mlx4_en_cache_mclist(struct net_device *dev) 169static void mlx4_en_cache_mclist(struct net_device *dev)
176{ 170{
177 struct mlx4_en_priv *priv = netdev_priv(dev); 171 struct mlx4_en_priv *priv = netdev_priv(dev);
178 struct dev_mc_list *mclist; 172 struct netdev_hw_addr *ha;
179 struct dev_mc_list *tmp; 173 char *mc_addrs;
180 struct dev_mc_list *plist = NULL; 174 int mc_addrs_cnt = netdev_mc_count(dev);
181 175 int i;
182 for (mclist = dev->mc_list; mclist; mclist = mclist->next) { 176
183 tmp = kmalloc(sizeof(struct dev_mc_list), GFP_ATOMIC); 177 mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC);
184 if (!tmp) { 178 if (!mc_addrs) {
185 en_err(priv, "failed to allocate multicast list\n"); 179 en_err(priv, "failed to allocate multicast list\n");
186 mlx4_en_clear_list(dev); 180 return;
187 return;
188 }
189 memcpy(tmp, mclist, sizeof(struct dev_mc_list));
190 tmp->next = NULL;
191 if (plist)
192 plist->next = tmp;
193 else
194 priv->mc_list = tmp;
195 plist = tmp;
196 } 181 }
182 i = 0;
183 netdev_for_each_mc_addr(ha, dev)
184 memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
185 priv->mc_addrs = mc_addrs;
186 priv->mc_addrs_cnt = mc_addrs_cnt;
197} 187}
198 188
199 189
@@ -213,7 +203,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
213 mcast_task); 203 mcast_task);
214 struct mlx4_en_dev *mdev = priv->mdev; 204 struct mlx4_en_dev *mdev = priv->mdev;
215 struct net_device *dev = priv->dev; 205 struct net_device *dev = priv->dev;
216 struct dev_mc_list *mclist;
217 u64 mcast_addr = 0; 206 u64 mcast_addr = 0;
218 int err; 207 int err;
219 208
@@ -289,6 +278,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
289 if (err) 278 if (err)
290 en_err(priv, "Failed disabling multicast filter\n"); 279 en_err(priv, "Failed disabling multicast filter\n");
291 } else { 280 } else {
281 int i;
282
292 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 283 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
293 0, MLX4_MCAST_DISABLE); 284 0, MLX4_MCAST_DISABLE);
294 if (err) 285 if (err)
@@ -303,8 +294,9 @@ static void mlx4_en_do_set_multicast(struct work_struct *work)
303 netif_tx_lock_bh(dev); 294 netif_tx_lock_bh(dev);
304 mlx4_en_cache_mclist(dev); 295 mlx4_en_cache_mclist(dev);
305 netif_tx_unlock_bh(dev); 296 netif_tx_unlock_bh(dev);
306 for (mclist = priv->mc_list; mclist; mclist = mclist->next) { 297 for (i = 0; i < priv->mc_addrs_cnt; i++) {
307 mcast_addr = mlx4_en_mac_to_u64(mclist->dmi_addr); 298 mcast_addr =
299 mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN);
308 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 300 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
309 mcast_addr, 0, MLX4_MCAST_CONFIG); 301 mcast_addr, 0, MLX4_MCAST_CONFIG);
310 } 302 }
@@ -512,7 +504,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
512 504
513 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 505 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
514 if (err) 506 if (err)
515 en_dbg(HW, priv, "Could not update stats \n"); 507 en_dbg(HW, priv, "Could not update stats\n");
516 508
517 mutex_lock(&mdev->state_lock); 509 mutex_lock(&mdev->state_lock);
518 if (mdev->device_up) { 510 if (mdev->device_up) {
@@ -985,7 +977,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
985 priv->flags = prof->flags; 977 priv->flags = prof->flags;
986 priv->tx_ring_num = prof->tx_ring_num; 978 priv->tx_ring_num = prof->tx_ring_num;
987 priv->rx_ring_num = prof->rx_ring_num; 979 priv->rx_ring_num = prof->rx_ring_num;
988 priv->mc_list = NULL;
989 priv->mac_index = -1; 980 priv->mac_index = -1;
990 priv->msg_enable = MLX4_EN_MSG_LEVEL; 981 priv->msg_enable = MLX4_EN_MSG_LEVEL;
991 spin_lock_init(&priv->stats_lock); 982 spin_lock_init(&priv->stats_lock);
diff --git a/drivers/net/mlx4/mlx4_en.h b/drivers/net/mlx4/mlx4_en.h
index 82c3ebc584e3..b55e46c8b682 100644
--- a/drivers/net/mlx4/mlx4_en.h
+++ b/drivers/net/mlx4/mlx4_en.h
@@ -492,7 +492,8 @@ struct mlx4_en_priv {
492 struct mlx4_en_perf_stats pstats; 492 struct mlx4_en_perf_stats pstats;
493 struct mlx4_en_pkt_stats pkstats; 493 struct mlx4_en_pkt_stats pkstats;
494 struct mlx4_en_port_stats port_stats; 494 struct mlx4_en_port_stats port_stats;
495 struct dev_mc_list *mc_list; 495 char *mc_addrs;
496 int mc_addrs_cnt;
496 struct mlx4_en_stat_out_mbox hw_stats; 497 struct mlx4_en_stat_out_mbox hw_stats;
497}; 498};
498 499
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index 8613a52ddf17..4ee9d04f6599 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1770,7 +1770,7 @@ static void mv643xx_eth_program_multicast_filter(struct net_device *dev)
1770 struct mv643xx_eth_private *mp = netdev_priv(dev); 1770 struct mv643xx_eth_private *mp = netdev_priv(dev);
1771 u32 *mc_spec; 1771 u32 *mc_spec;
1772 u32 *mc_other; 1772 u32 *mc_other;
1773 struct dev_addr_list *addr; 1773 struct netdev_hw_addr *ha;
1774 int i; 1774 int i;
1775 1775
1776 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { 1776 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
@@ -1795,8 +1795,8 @@ oom:
1795 memset(mc_spec, 0, 0x100); 1795 memset(mc_spec, 0, 0x100);
1796 memset(mc_other, 0, 0x100); 1796 memset(mc_other, 0, 0x100);
1797 1797
1798 netdev_for_each_mc_addr(addr, dev) { 1798 netdev_for_each_mc_addr(ha, dev) {
1799 u8 *a = addr->da_addr; 1799 u8 *a = ha->addr;
1800 u32 *table; 1800 u32 *table;
1801 int entry; 1801 int entry;
1802 1802
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index ecde0876a785..e0b47cc8a86e 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -110,15 +110,15 @@ MODULE_LICENSE("Dual BSD/GPL");
110struct myri10ge_rx_buffer_state { 110struct myri10ge_rx_buffer_state {
111 struct page *page; 111 struct page *page;
112 int page_offset; 112 int page_offset;
113 DECLARE_PCI_UNMAP_ADDR(bus) 113 DEFINE_DMA_UNMAP_ADDR(bus);
114 DECLARE_PCI_UNMAP_LEN(len) 114 DEFINE_DMA_UNMAP_LEN(len);
115}; 115};
116 116
117struct myri10ge_tx_buffer_state { 117struct myri10ge_tx_buffer_state {
118 struct sk_buff *skb; 118 struct sk_buff *skb;
119 int last; 119 int last;
120 DECLARE_PCI_UNMAP_ADDR(bus) 120 DEFINE_DMA_UNMAP_ADDR(bus);
121 DECLARE_PCI_UNMAP_LEN(len) 121 DEFINE_DMA_UNMAP_LEN(len);
122}; 122};
123 123
124struct myri10ge_cmd { 124struct myri10ge_cmd {
@@ -1234,7 +1234,7 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
1234 rx->info[idx].page_offset = rx->page_offset; 1234 rx->info[idx].page_offset = rx->page_offset;
1235 /* note that this is the address of the start of the 1235 /* note that this is the address of the start of the
1236 * page */ 1236 * page */
1237 pci_unmap_addr_set(&rx->info[idx], bus, rx->bus); 1237 dma_unmap_addr_set(&rx->info[idx], bus, rx->bus);
1238 rx->shadow[idx].addr_low = 1238 rx->shadow[idx].addr_low =
1239 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset); 1239 htonl(MYRI10GE_LOWPART_TO_U32(rx->bus) + rx->page_offset);
1240 rx->shadow[idx].addr_high = 1240 rx->shadow[idx].addr_high =
@@ -1266,7 +1266,7 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev,
1266 /* unmap the recvd page if we're the only or last user of it */ 1266 /* unmap the recvd page if we're the only or last user of it */
1267 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || 1267 if (bytes >= MYRI10GE_ALLOC_SIZE / 2 ||
1268 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { 1268 (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) {
1269 pci_unmap_page(pdev, (pci_unmap_addr(info, bus) 1269 pci_unmap_page(pdev, (dma_unmap_addr(info, bus)
1270 & ~(MYRI10GE_ALLOC_SIZE - 1)), 1270 & ~(MYRI10GE_ALLOC_SIZE - 1)),
1271 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); 1271 MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
1272 } 1272 }
@@ -1373,21 +1373,21 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index)
1373 tx->info[idx].last = 0; 1373 tx->info[idx].last = 0;
1374 } 1374 }
1375 tx->done++; 1375 tx->done++;
1376 len = pci_unmap_len(&tx->info[idx], len); 1376 len = dma_unmap_len(&tx->info[idx], len);
1377 pci_unmap_len_set(&tx->info[idx], len, 0); 1377 dma_unmap_len_set(&tx->info[idx], len, 0);
1378 if (skb) { 1378 if (skb) {
1379 ss->stats.tx_bytes += skb->len; 1379 ss->stats.tx_bytes += skb->len;
1380 ss->stats.tx_packets++; 1380 ss->stats.tx_packets++;
1381 dev_kfree_skb_irq(skb); 1381 dev_kfree_skb_irq(skb);
1382 if (len) 1382 if (len)
1383 pci_unmap_single(pdev, 1383 pci_unmap_single(pdev,
1384 pci_unmap_addr(&tx->info[idx], 1384 dma_unmap_addr(&tx->info[idx],
1385 bus), len, 1385 bus), len,
1386 PCI_DMA_TODEVICE); 1386 PCI_DMA_TODEVICE);
1387 } else { 1387 } else {
1388 if (len) 1388 if (len)
1389 pci_unmap_page(pdev, 1389 pci_unmap_page(pdev,
1390 pci_unmap_addr(&tx->info[idx], 1390 dma_unmap_addr(&tx->info[idx],
1391 bus), len, 1391 bus), len,
1392 PCI_DMA_TODEVICE); 1392 PCI_DMA_TODEVICE);
1393 } 1393 }
@@ -2094,20 +2094,20 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss)
2094 /* Mark as free */ 2094 /* Mark as free */
2095 tx->info[idx].skb = NULL; 2095 tx->info[idx].skb = NULL;
2096 tx->done++; 2096 tx->done++;
2097 len = pci_unmap_len(&tx->info[idx], len); 2097 len = dma_unmap_len(&tx->info[idx], len);
2098 pci_unmap_len_set(&tx->info[idx], len, 0); 2098 dma_unmap_len_set(&tx->info[idx], len, 0);
2099 if (skb) { 2099 if (skb) {
2100 ss->stats.tx_dropped++; 2100 ss->stats.tx_dropped++;
2101 dev_kfree_skb_any(skb); 2101 dev_kfree_skb_any(skb);
2102 if (len) 2102 if (len)
2103 pci_unmap_single(mgp->pdev, 2103 pci_unmap_single(mgp->pdev,
2104 pci_unmap_addr(&tx->info[idx], 2104 dma_unmap_addr(&tx->info[idx],
2105 bus), len, 2105 bus), len,
2106 PCI_DMA_TODEVICE); 2106 PCI_DMA_TODEVICE);
2107 } else { 2107 } else {
2108 if (len) 2108 if (len)
2109 pci_unmap_page(mgp->pdev, 2109 pci_unmap_page(mgp->pdev,
2110 pci_unmap_addr(&tx->info[idx], 2110 dma_unmap_addr(&tx->info[idx],
2111 bus), len, 2111 bus), len,
2112 PCI_DMA_TODEVICE); 2112 PCI_DMA_TODEVICE);
2113 } 2113 }
@@ -2757,12 +2757,12 @@ again:
2757 } 2757 }
2758 2758
2759 /* map the skb for DMA */ 2759 /* map the skb for DMA */
2760 len = skb->len - skb->data_len; 2760 len = skb_headlen(skb);
2761 idx = tx->req & tx->mask; 2761 idx = tx->req & tx->mask;
2762 tx->info[idx].skb = skb; 2762 tx->info[idx].skb = skb;
2763 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); 2763 bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2764 pci_unmap_addr_set(&tx->info[idx], bus, bus); 2764 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2765 pci_unmap_len_set(&tx->info[idx], len, len); 2765 dma_unmap_len_set(&tx->info[idx], len, len);
2766 2766
2767 frag_cnt = skb_shinfo(skb)->nr_frags; 2767 frag_cnt = skb_shinfo(skb)->nr_frags;
2768 frag_idx = 0; 2768 frag_idx = 0;
@@ -2865,8 +2865,8 @@ again:
2865 len = frag->size; 2865 len = frag->size;
2866 bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset, 2866 bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
2867 len, PCI_DMA_TODEVICE); 2867 len, PCI_DMA_TODEVICE);
2868 pci_unmap_addr_set(&tx->info[idx], bus, bus); 2868 dma_unmap_addr_set(&tx->info[idx], bus, bus);
2869 pci_unmap_len_set(&tx->info[idx], len, len); 2869 dma_unmap_len_set(&tx->info[idx], len, len);
2870 } 2870 }
2871 2871
2872 (req - rdma_count)->rdma_count = rdma_count; 2872 (req - rdma_count)->rdma_count = rdma_count;
@@ -2903,19 +2903,19 @@ abort_linearize:
2903 idx = tx->req & tx->mask; 2903 idx = tx->req & tx->mask;
2904 tx->info[idx].skb = NULL; 2904 tx->info[idx].skb = NULL;
2905 do { 2905 do {
2906 len = pci_unmap_len(&tx->info[idx], len); 2906 len = dma_unmap_len(&tx->info[idx], len);
2907 if (len) { 2907 if (len) {
2908 if (tx->info[idx].skb != NULL) 2908 if (tx->info[idx].skb != NULL)
2909 pci_unmap_single(mgp->pdev, 2909 pci_unmap_single(mgp->pdev,
2910 pci_unmap_addr(&tx->info[idx], 2910 dma_unmap_addr(&tx->info[idx],
2911 bus), len, 2911 bus), len,
2912 PCI_DMA_TODEVICE); 2912 PCI_DMA_TODEVICE);
2913 else 2913 else
2914 pci_unmap_page(mgp->pdev, 2914 pci_unmap_page(mgp->pdev,
2915 pci_unmap_addr(&tx->info[idx], 2915 dma_unmap_addr(&tx->info[idx],
2916 bus), len, 2916 bus), len,
2917 PCI_DMA_TODEVICE); 2917 PCI_DMA_TODEVICE);
2918 pci_unmap_len_set(&tx->info[idx], len, 0); 2918 dma_unmap_len_set(&tx->info[idx], len, 0);
2919 tx->info[idx].skb = NULL; 2919 tx->info[idx].skb = NULL;
2920 } 2920 }
2921 idx = (idx + 1) & tx->mask; 2921 idx = (idx + 1) & tx->mask;
@@ -3002,7 +3002,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3002{ 3002{
3003 struct myri10ge_priv *mgp = netdev_priv(dev); 3003 struct myri10ge_priv *mgp = netdev_priv(dev);
3004 struct myri10ge_cmd cmd; 3004 struct myri10ge_cmd cmd;
3005 struct dev_mc_list *mc_list; 3005 struct netdev_hw_addr *ha;
3006 __be32 data[2] = { 0, 0 }; 3006 __be32 data[2] = { 0, 0 };
3007 int err; 3007 int err;
3008 3008
@@ -3039,8 +3039,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3039 } 3039 }
3040 3040
3041 /* Walk the multicast list, and add each address */ 3041 /* Walk the multicast list, and add each address */
3042 netdev_for_each_mc_addr(mc_list, dev) { 3042 netdev_for_each_mc_addr(ha, dev) {
3043 memcpy(data, &mc_list->dmi_addr, 6); 3043 memcpy(data, &ha->addr, 6);
3044 cmd.data0 = ntohl(data[0]); 3044 cmd.data0 = ntohl(data[0]);
3045 cmd.data1 = ntohl(data[1]); 3045 cmd.data1 = ntohl(data[1]);
3046 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP, 3046 err = myri10ge_send_cmd(mgp, MXGEFW_JOIN_MULTICAST_GROUP,
@@ -3048,7 +3048,7 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
3048 3048
3049 if (err != 0) { 3049 if (err != 0) {
3050 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n", 3050 netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
3051 err, mc_list->dmi_addr); 3051 err, ha->addr);
3052 goto abort; 3052 goto abort;
3053 } 3053 }
3054 } 3054 }
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index e52038783245..9250bf6573ec 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -2493,12 +2493,12 @@ static void __set_rx_mode(struct net_device *dev)
2493 rx_mode = RxFilterEnable | AcceptBroadcast 2493 rx_mode = RxFilterEnable | AcceptBroadcast
2494 | AcceptAllMulticast | AcceptMyPhys; 2494 | AcceptAllMulticast | AcceptMyPhys;
2495 } else { 2495 } else {
2496 struct dev_mc_list *mclist; 2496 struct netdev_hw_addr *ha;
2497 int i; 2497 int i;
2498 2498
2499 memset(mc_filter, 0, sizeof(mc_filter)); 2499 memset(mc_filter, 0, sizeof(mc_filter));
2500 netdev_for_each_mc_addr(mclist, dev) { 2500 netdev_for_each_mc_addr(ha, dev) {
2501 int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff; 2501 int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2502 mc_filter[b/8] |= (1 << (b & 0x07)); 2502 mc_filter[b/8] |= (1 << (b & 0x07));
2503 } 2503 }
2504 rx_mode = RxFilterEnable | AcceptBroadcast 2504 rx_mode = RxFilterEnable | AcceptBroadcast
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 0f703838e21a..174ac8ef82fa 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -420,7 +420,6 @@ struct status_desc {
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/ 422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0xc8000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0 423#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6 424#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7 425#define NX_UNI_DIR_SECT_FW 0x7
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index f8499e56cbee..aecba787f7c8 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -703,6 +703,11 @@ netxen_nic_get_ethtool_stats(struct net_device *dev,
703 } 703 }
704} 704}
705 705
706static u32 netxen_nic_get_tx_csum(struct net_device *dev)
707{
708 return dev->features & NETIF_F_IP_CSUM;
709}
710
706static u32 netxen_nic_get_rx_csum(struct net_device *dev) 711static u32 netxen_nic_get_rx_csum(struct net_device *dev)
707{ 712{
708 struct netxen_adapter *adapter = netdev_priv(dev); 713 struct netxen_adapter *adapter = netdev_priv(dev);
@@ -909,6 +914,7 @@ const struct ethtool_ops netxen_nic_ethtool_ops = {
909 .set_ringparam = netxen_nic_set_ringparam, 914 .set_ringparam = netxen_nic_set_ringparam,
910 .get_pauseparam = netxen_nic_get_pauseparam, 915 .get_pauseparam = netxen_nic_get_pauseparam,
911 .set_pauseparam = netxen_nic_set_pauseparam, 916 .set_pauseparam = netxen_nic_set_pauseparam,
917 .get_tx_csum = netxen_nic_get_tx_csum,
912 .set_tx_csum = ethtool_op_set_tx_csum, 918 .set_tx_csum = ethtool_op_set_tx_csum,
913 .set_sg = ethtool_op_set_sg, 919 .set_sg = ethtool_op_set_sg,
914 .get_tso = netxen_nic_get_tso, 920 .get_tso = netxen_nic_get_tso,
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index b1cf46a0c48c..5e5fe2fd6397 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -538,7 +538,7 @@ netxen_nic_set_mcast_addr(struct netxen_adapter *adapter,
538void netxen_p2_nic_set_multi(struct net_device *netdev) 538void netxen_p2_nic_set_multi(struct net_device *netdev)
539{ 539{
540 struct netxen_adapter *adapter = netdev_priv(netdev); 540 struct netxen_adapter *adapter = netdev_priv(netdev);
541 struct dev_mc_list *mc_ptr; 541 struct netdev_hw_addr *ha;
542 u8 null_addr[6]; 542 u8 null_addr[6];
543 int i; 543 int i;
544 544
@@ -572,8 +572,8 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
572 netxen_nic_enable_mcast_filter(adapter); 572 netxen_nic_enable_mcast_filter(adapter);
573 573
574 i = 0; 574 i = 0;
575 netdev_for_each_mc_addr(mc_ptr, netdev) 575 netdev_for_each_mc_addr(ha, netdev)
576 netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr); 576 netxen_nic_set_mcast_addr(adapter, i++, ha->addr);
577 577
578 /* Clear out remaining addresses */ 578 /* Clear out remaining addresses */
579 while (i < adapter->max_mc_count) 579 while (i < adapter->max_mc_count)
@@ -681,7 +681,7 @@ static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
681void netxen_p3_nic_set_multi(struct net_device *netdev) 681void netxen_p3_nic_set_multi(struct net_device *netdev)
682{ 682{
683 struct netxen_adapter *adapter = netdev_priv(netdev); 683 struct netxen_adapter *adapter = netdev_priv(netdev);
684 struct dev_mc_list *mc_ptr; 684 struct netdev_hw_addr *ha;
685 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 685 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
686 u32 mode = VPORT_MISS_MODE_DROP; 686 u32 mode = VPORT_MISS_MODE_DROP;
687 LIST_HEAD(del_list); 687 LIST_HEAD(del_list);
@@ -708,8 +708,8 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
708 } 708 }
709 709
710 if (!netdev_mc_empty(netdev)) { 710 if (!netdev_mc_empty(netdev)) {
711 netdev_for_each_mc_addr(mc_ptr, netdev) 711 netdev_for_each_mc_addr(ha, netdev)
712 nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list); 712 nx_p3_nic_add_mac(adapter, ha->addr, &del_list);
713 } 713 }
714 714
715send_fw_cmd: 715send_fw_cmd:
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 02876f59cbb2..388feaf60ee7 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -614,22 +614,123 @@ static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
614 return NULL; 614 return NULL;
615} 615}
616 616
617#define QLCNIC_FILEHEADER_SIZE (14 * 4)
618
617static int 619static int
618nx_set_product_offs(struct netxen_adapter *adapter) 620netxen_nic_validate_header(struct netxen_adapter *adapter)
619{ 621 {
620 struct uni_table_desc *ptab_descr;
621 const u8 *unirom = adapter->fw->data; 622 const u8 *unirom = adapter->fw->data;
622 uint32_t i; 623 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
624 u32 fw_file_size = adapter->fw->size;
625 u32 tab_size;
623 __le32 entries; 626 __le32 entries;
627 __le32 entry_size;
628
629 if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
630 return -EINVAL;
631
632 entries = cpu_to_le32(directory->num_entries);
633 entry_size = cpu_to_le32(directory->entry_size);
634 tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
635
636 if (fw_file_size < tab_size)
637 return -EINVAL;
638
639 return 0;
640}
641
642static int
643netxen_nic_validate_bootld(struct netxen_adapter *adapter)
644{
645 struct uni_table_desc *tab_desc;
646 struct uni_data_desc *descr;
647 const u8 *unirom = adapter->fw->data;
648 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
649 NX_UNI_BOOTLD_IDX_OFF));
650 u32 offs;
651 u32 tab_size;
652 u32 data_size;
653
654 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
655
656 if (!tab_desc)
657 return -EINVAL;
658
659 tab_size = cpu_to_le32(tab_desc->findex) +
660 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
661
662 if (adapter->fw->size < tab_size)
663 return -EINVAL;
664
665 offs = cpu_to_le32(tab_desc->findex) +
666 (cpu_to_le32(tab_desc->entry_size) * (idx));
667 descr = (struct uni_data_desc *)&unirom[offs];
668
669 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
670
671 if (adapter->fw->size < data_size)
672 return -EINVAL;
624 673
674 return 0;
675}
676
677static int
678netxen_nic_validate_fw(struct netxen_adapter *adapter)
679{
680 struct uni_table_desc *tab_desc;
681 struct uni_data_desc *descr;
682 const u8 *unirom = adapter->fw->data;
683 __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
684 NX_UNI_FIRMWARE_IDX_OFF));
685 u32 offs;
686 u32 tab_size;
687 u32 data_size;
688
689 tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
690
691 if (!tab_desc)
692 return -EINVAL;
693
694 tab_size = cpu_to_le32(tab_desc->findex) +
695 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
696
697 if (adapter->fw->size < tab_size)
698 return -EINVAL;
699
700 offs = cpu_to_le32(tab_desc->findex) +
701 (cpu_to_le32(tab_desc->entry_size) * (idx));
702 descr = (struct uni_data_desc *)&unirom[offs];
703 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
704
705 if (adapter->fw->size < data_size)
706 return -EINVAL;
707
708 return 0;
709}
710
711
712static int
713netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
714{
715 struct uni_table_desc *ptab_descr;
716 const u8 *unirom = adapter->fw->data;
625 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ? 717 int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
626 1 : netxen_p3_has_mn(adapter); 718 1 : netxen_p3_has_mn(adapter);
719 __le32 entries;
720 __le32 entry_size;
721 u32 tab_size;
722 u32 i;
627 723
628 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL); 724 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
629 if (ptab_descr == NULL) 725 if (ptab_descr == NULL)
630 return -1; 726 return -EINVAL;
631 727
632 entries = cpu_to_le32(ptab_descr->num_entries); 728 entries = cpu_to_le32(ptab_descr->num_entries);
729 entry_size = cpu_to_le32(ptab_descr->entry_size);
730 tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
731
732 if (adapter->fw->size < tab_size)
733 return -EINVAL;
633 734
634nomn: 735nomn:
635 for (i = 0; i < entries; i++) { 736 for (i = 0; i < entries; i++) {
@@ -658,9 +759,38 @@ nomn:
658 goto nomn; 759 goto nomn;
659 } 760 }
660 761
661 return -1; 762 return -EINVAL;
662} 763}
663 764
765static int
766netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
767{
768 if (netxen_nic_validate_header(adapter)) {
769 dev_err(&adapter->pdev->dev,
770 "unified image: header validation failed\n");
771 return -EINVAL;
772 }
773
774 if (netxen_nic_validate_product_offs(adapter)) {
775 dev_err(&adapter->pdev->dev,
776 "unified image: product validation failed\n");
777 return -EINVAL;
778 }
779
780 if (netxen_nic_validate_bootld(adapter)) {
781 dev_err(&adapter->pdev->dev,
782 "unified image: bootld validation failed\n");
783 return -EINVAL;
784 }
785
786 if (netxen_nic_validate_fw(adapter)) {
787 dev_err(&adapter->pdev->dev,
788 "unified image: firmware validation failed\n");
789 return -EINVAL;
790 }
791
792 return 0;
793}
664 794
665static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter, 795static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
666 u32 section, u32 idx_offset) 796 u32 section, u32 idx_offset)
@@ -890,6 +1020,16 @@ netxen_load_firmware(struct netxen_adapter *adapter)
890 1020
891 flashaddr += 8; 1021 flashaddr += 8;
892 } 1022 }
1023
1024 size = (__force u32)nx_get_fw_size(adapter) % 8;
1025 if (size) {
1026 data = cpu_to_le64(ptr64[i]);
1027
1028 if (adapter->pci_mem_write(adapter,
1029 flashaddr, data))
1030 return -EIO;
1031 }
1032
893 } else { 1033 } else {
894 u64 data; 1034 u64 data;
895 u32 hi, lo; 1035 u32 hi, lo;
@@ -934,27 +1074,23 @@ static int
934netxen_validate_firmware(struct netxen_adapter *adapter) 1074netxen_validate_firmware(struct netxen_adapter *adapter)
935{ 1075{
936 __le32 val; 1076 __le32 val;
937 u32 ver, min_ver, bios, min_size; 1077 u32 ver, min_ver, bios;
938 struct pci_dev *pdev = adapter->pdev; 1078 struct pci_dev *pdev = adapter->pdev;
939 const struct firmware *fw = adapter->fw; 1079 const struct firmware *fw = adapter->fw;
940 u8 fw_type = adapter->fw_type; 1080 u8 fw_type = adapter->fw_type;
941 1081
942 if (fw_type == NX_UNIFIED_ROMIMAGE) { 1082 if (fw_type == NX_UNIFIED_ROMIMAGE) {
943 if (nx_set_product_offs(adapter)) 1083 if (netxen_nic_validate_unified_romimage(adapter))
944 return -EINVAL; 1084 return -EINVAL;
945
946 min_size = NX_UNI_FW_MIN_SIZE;
947 } else { 1085 } else {
948 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 1086 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
949 if ((__force u32)val != NETXEN_BDINFO_MAGIC) 1087 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
950 return -EINVAL; 1088 return -EINVAL;
951 1089
952 min_size = NX_FW_MIN_SIZE; 1090 if (fw->size < NX_FW_MIN_SIZE)
1091 return -EINVAL;
953 } 1092 }
954 1093
955 if (fw->size < min_size)
956 return -EINVAL;
957
958 val = nx_get_fw_version(adapter); 1094 val = nx_get_fw_version(adapter);
959 1095
960 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 1096 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index ce838f7c8b0f..b665b420a4f2 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -782,15 +782,22 @@ netxen_check_options(struct netxen_adapter *adapter)
782 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 782 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
783 adapter->msix_supported = !!use_msi_x; 783 adapter->msix_supported = !!use_msi_x;
784 adapter->rss_supported = !!use_msi_x; 784 adapter->rss_supported = !!use_msi_x;
785 } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) { 785 } else {
786 switch (adapter->ahw.board_type) { 786 u32 flashed_ver = 0;
787 case NETXEN_BRDTYPE_P2_SB31_10G: 787 netxen_rom_fast_read(adapter,
788 case NETXEN_BRDTYPE_P2_SB31_10G_CX4: 788 NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
789 adapter->msix_supported = !!use_msi_x; 789 flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
790 adapter->rss_supported = !!use_msi_x; 790
791 break; 791 if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
792 default: 792 switch (adapter->ahw.board_type) {
793 break; 793 case NETXEN_BRDTYPE_P2_SB31_10G:
794 case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
795 adapter->msix_supported = !!use_msi_x;
796 adapter->rss_supported = !!use_msi_x;
797 break;
798 default:
799 break;
800 }
794 } 801 }
795 } 802 }
796 803
@@ -2304,6 +2311,7 @@ netxen_fwinit_work(struct work_struct *work)
2304 } 2311 }
2305 break; 2312 break;
2306 2313
2314 case NX_DEV_NEED_RESET:
2307 case NX_DEV_INITALIZING: 2315 case NX_DEV_INITALIZING:
2308 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) { 2316 if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
2309 netxen_schedule_work(adapter, 2317 netxen_schedule_work(adapter,
@@ -2347,6 +2355,9 @@ netxen_detach_work(struct work_struct *work)
2347 2355
2348 ref_cnt = nx_decr_dev_ref_cnt(adapter); 2356 ref_cnt = nx_decr_dev_ref_cnt(adapter);
2349 2357
2358 if (ref_cnt == -EIO)
2359 goto err_ret;
2360
2350 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY); 2361 delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
2351 2362
2352 adapter->fw_wait_cnt = 0; 2363 adapter->fw_wait_cnt = 0;
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index f7a8f707361e..b7837ebd9a7d 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -595,7 +595,7 @@ static int init586(struct net_device *dev)
595 struct iasetup_cmd_struct __iomem *ias_cmd; 595 struct iasetup_cmd_struct __iomem *ias_cmd;
596 struct tdr_cmd_struct __iomem *tdr_cmd; 596 struct tdr_cmd_struct __iomem *tdr_cmd;
597 struct mcsetup_cmd_struct __iomem *mc_cmd; 597 struct mcsetup_cmd_struct __iomem *mc_cmd;
598 struct dev_mc_list *dmi; 598 struct netdev_hw_addr *ha;
599 int num_addrs = netdev_mc_count(dev); 599 int num_addrs = netdev_mc_count(dev);
600 600
601 ptr = p->scb + 1; 601 ptr = p->scb + 1;
@@ -724,8 +724,8 @@ static int init586(struct net_device *dev)
724 writew(num_addrs * 6, &mc_cmd->mc_cnt); 724 writew(num_addrs * 6, &mc_cmd->mc_cnt);
725 725
726 i = 0; 726 i = 0;
727 netdev_for_each_mc_addr(dmi, dev) 727 netdev_for_each_mc_addr(ha, dev)
728 memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6); 728 memcpy_toio(mc_cmd->mc_list[i++], ha->addr, 6);
729 729
730 writew(make16(mc_cmd), &p->scb->cbl_offset); 730 writew(make16(mc_cmd), &p->scb->cbl_offset);
731 writeb(CUC_START, &p->scb->cmd_cuc); 731 writeb(CUC_START, &p->scb->cmd_cuc);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d5cd16bfc907..493e25cf1014 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6314,7 +6314,6 @@ static void niu_set_rx_mode(struct net_device *dev)
6314{ 6314{
6315 struct niu *np = netdev_priv(dev); 6315 struct niu *np = netdev_priv(dev);
6316 int i, alt_cnt, err; 6316 int i, alt_cnt, err;
6317 struct dev_addr_list *addr;
6318 struct netdev_hw_addr *ha; 6317 struct netdev_hw_addr *ha;
6319 unsigned long flags; 6318 unsigned long flags;
6320 u16 hash[16] = { 0, }; 6319 u16 hash[16] = { 0, };
@@ -6366,8 +6365,8 @@ static void niu_set_rx_mode(struct net_device *dev)
6366 for (i = 0; i < 16; i++) 6365 for (i = 0; i < 16; i++)
6367 hash[i] = 0xffff; 6366 hash[i] = 0xffff;
6368 } else if (!netdev_mc_empty(dev)) { 6367 } else if (!netdev_mc_empty(dev)) {
6369 netdev_for_each_mc_addr(addr, dev) { 6368 netdev_for_each_mc_addr(ha, dev) {
6370 u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr); 6369 u32 crc = ether_crc_le(ETH_ALEN, ha->addr);
6371 6370
6372 crc >>= 24; 6371 crc >>= 24;
6373 hash[crc >> 4] |= (1 << (15 - (crc & 0xf))); 6372 hash[crc >> 4] |= (1 << (15 - (crc & 0xf)));
@@ -9839,7 +9838,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9839 } 9838 }
9840 } 9839 }
9841 9840
9842 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM); 9841 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_GRO);
9843 9842
9844 np->regs = pci_ioremap_bar(pdev, 0); 9843 np->regs = pci_ioremap_bar(pdev, 0);
9845 if (!np->regs) { 9844 if (!np->regs) {
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 8aadc8e2ddd7..6b1d443f2ce5 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -317,7 +317,6 @@ good:
317 skb->protocol = eth_type_trans(skb, netdev); 317 skb->protocol = eth_type_trans(skb, netdev);
318 netdev->stats.rx_packets++; 318 netdev->stats.rx_packets++;
319 netdev->stats.rx_bytes += skb->len; 319 netdev->stats.rx_bytes += skb->len;
320 netdev->last_rx = jiffies;
321 netif_receive_skb(skb); 320 netif_receive_skb(skb);
322 rc = 0; 321 rc = 0;
323 } else if (re.s.code == RING_ENTRY_CODE_MORE) { 322 } else if (re.s.code == RING_ENTRY_CODE_MORE) {
@@ -475,7 +474,7 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
475 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ 474 unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
476 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 475 unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
477 struct octeon_mgmt_cam_state cam_state; 476 struct octeon_mgmt_cam_state cam_state;
478 struct dev_addr_list *list; 477 struct netdev_hw_addr *ha;
479 struct list_head *pos; 478 struct list_head *pos;
480 int available_cam_entries; 479 int available_cam_entries;
481 480
@@ -511,8 +510,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
511 } 510 }
512 } 511 }
513 if (multicast_mode == 0) { 512 if (multicast_mode == 0) {
514 netdev_for_each_mc_addr(list, netdev) 513 netdev_for_each_mc_addr(ha, netdev)
515 octeon_mgmt_cam_state_add(&cam_state, list->da_addr); 514 octeon_mgmt_cam_state_add(&cam_state, ha->addr);
516 } 515 }
517 516
518 517
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 36785853a149..dc3b4c7914fd 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -1813,12 +1813,12 @@ static void netdrv_set_rx_mode(struct net_device *dev)
1813 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1813 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1814 mc_filter[1] = mc_filter[0] = 0xffffffff; 1814 mc_filter[1] = mc_filter[0] = 0xffffffff;
1815 } else { 1815 } else {
1816 struct dev_mc_list *mclist; 1816 struct netdev_hw_addr *ha;
1817 1817
1818 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1818 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1819 mc_filter[1] = mc_filter[0] = 0; 1819 mc_filter[1] = mc_filter[0] = 0;
1820 netdev_for_each_mc_addr(mclist, dev) { 1820 netdev_for_each_mc_addr(ha, dev) {
1821 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1821 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1822 1822
1823 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1823 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1824 } 1824 }
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 091e0b00043e..580977f56ad0 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -1,20 +1,20 @@
1/*====================================================================== 1/*======================================================================
2 2
3 A PCMCIA ethernet driver for the 3com 3c589 card. 3 A PCMCIA ethernet driver for the 3com 3c589 card.
4 4
5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net 5 Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
6 6
7 3c589_cs.c 1.162 2001/10/13 00:08:50 7 3c589_cs.c 1.162 2001/10/13 00:08:50
8 8
9 The network driver code is based on Donald Becker's 3c589 code: 9 The network driver code is based on Donald Becker's 3c589 code:
10 10
11 Written 1994 by Donald Becker. 11 Written 1994 by Donald Becker.
12 Copyright 1993 United States Government as represented by the 12 Copyright 1993 United States Government as represented by the
13 Director, National Security Agency. This software may be used and 13 Director, National Security Agency. This software may be used and
14 distributed according to the terms of the GNU General Public License, 14 distributed according to the terms of the GNU General Public License,
15 incorporated herein by reference. 15 incorporated herein by reference.
16 Donald Becker may be reached at becker@scyld.com 16 Donald Becker may be reached at becker@scyld.com
17 17
18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk> 18 Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
19 19
20======================================================================*/ 20======================================================================*/
@@ -69,31 +69,54 @@
69/* The top five bits written to EL3_CMD are a command, the lower 69/* The top five bits written to EL3_CMD are a command, the lower
70 11 bits are the parameter, if applicable. */ 70 11 bits are the parameter, if applicable. */
71enum c509cmd { 71enum c509cmd {
72 TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11, 72 TotalReset = 0<<11,
73 RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11, RxDiscard = 8<<11, 73 SelectWindow = 1<<11,
74 TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11, 74 StartCoax = 2<<11,
75 FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11, 75 RxDisable = 3<<11,
76 SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11, 76 RxEnable = 4<<11,
77 SetTxThreshold = 18<<11, SetTxStart = 19<<11, StatsEnable = 21<<11, 77 RxReset = 5<<11,
78 StatsDisable = 22<<11, StopCoax = 23<<11, 78 RxDiscard = 8<<11,
79 TxEnable = 9<<11,
80 TxDisable = 10<<11,
81 TxReset = 11<<11,
82 FakeIntr = 12<<11,
83 AckIntr = 13<<11,
84 SetIntrEnb = 14<<11,
85 SetStatusEnb = 15<<11,
86 SetRxFilter = 16<<11,
87 SetRxThreshold = 17<<11,
88 SetTxThreshold = 18<<11,
89 SetTxStart = 19<<11,
90 StatsEnable = 21<<11,
91 StatsDisable = 22<<11,
92 StopCoax = 23<<11
79}; 93};
80 94
81enum c509status { 95enum c509status {
82 IntLatch = 0x0001, AdapterFailure = 0x0002, TxComplete = 0x0004, 96 IntLatch = 0x0001,
83 TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020, 97 AdapterFailure = 0x0002,
84 IntReq = 0x0040, StatsFull = 0x0080, CmdBusy = 0x1000 98 TxComplete = 0x0004,
99 TxAvailable = 0x0008,
100 RxComplete = 0x0010,
101 RxEarly = 0x0020,
102 IntReq = 0x0040,
103 StatsFull = 0x0080,
104 CmdBusy = 0x1000
85}; 105};
86 106
87/* The SetRxFilter command accepts the following classes: */ 107/* The SetRxFilter command accepts the following classes: */
88enum RxFilter { 108enum RxFilter {
89 RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 109 RxStation = 1,
110 RxMulticast = 2,
111 RxBroadcast = 4,
112 RxProm = 8
90}; 113};
91 114
92/* Register window 1 offsets, the window used in normal operation. */ 115/* Register window 1 offsets, the window used in normal operation. */
93#define TX_FIFO 0x00 116#define TX_FIFO 0x00
94#define RX_FIFO 0x00 117#define RX_FIFO 0x00
95#define RX_STATUS 0x08 118#define RX_STATUS 0x08
96#define TX_STATUS 0x0B 119#define TX_STATUS 0x0B
97#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */ 120#define TX_FREE 0x0C /* Remaining free bytes in Tx buffer. */
98 121
99#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */ 122#define WN0_IRQ 0x08 /* Window 0: Set IRQ line in bits 12-15. */
@@ -106,13 +129,13 @@ enum RxFilter {
106 129
107struct el3_private { 130struct el3_private {
108 struct pcmcia_device *p_dev; 131 struct pcmcia_device *p_dev;
109 dev_node_t node; 132 dev_node_t node;
110 /* For transceiver monitoring */ 133 /* For transceiver monitoring */
111 struct timer_list media; 134 struct timer_list media;
112 u16 media_status; 135 u16 media_status;
113 u16 fast_poll; 136 u16 fast_poll;
114 unsigned long last_irq; 137 unsigned long last_irq;
115 spinlock_t lock; 138 spinlock_t lock;
116}; 139};
117 140
118static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" }; 141static const char *if_names[] = { "auto", "10baseT", "10base2", "AUI" };
@@ -164,15 +187,15 @@ static void tc589_detach(struct pcmcia_device *p_dev);
164======================================================================*/ 187======================================================================*/
165 188
166static const struct net_device_ops el3_netdev_ops = { 189static const struct net_device_ops el3_netdev_ops = {
167 .ndo_open = el3_open, 190 .ndo_open = el3_open,
168 .ndo_stop = el3_close, 191 .ndo_stop = el3_close,
169 .ndo_start_xmit = el3_start_xmit, 192 .ndo_start_xmit = el3_start_xmit,
170 .ndo_tx_timeout = el3_tx_timeout, 193 .ndo_tx_timeout = el3_tx_timeout,
171 .ndo_set_config = el3_config, 194 .ndo_set_config = el3_config,
172 .ndo_get_stats = el3_get_stats, 195 .ndo_get_stats = el3_get_stats,
173 .ndo_set_multicast_list = set_multicast_list, 196 .ndo_set_multicast_list = set_multicast_list,
174 .ndo_change_mtu = eth_change_mtu, 197 .ndo_change_mtu = eth_change_mtu,
175 .ndo_set_mac_address = eth_mac_addr, 198 .ndo_set_mac_address = eth_mac_addr,
176 .ndo_validate_addr = eth_validate_addr, 199 .ndo_validate_addr = eth_validate_addr,
177}; 200};
178 201
@@ -236,7 +259,7 @@ static void tc589_detach(struct pcmcia_device *link)
236 tc589_config() is scheduled to run after a CARD_INSERTION event 259 tc589_config() is scheduled to run after a CARD_INSERTION event
237 is received, to configure the PCMCIA socket, and to make the 260 is received, to configure the PCMCIA socket, and to make the
238 ethernet device available to the system. 261 ethernet device available to the system.
239 262
240======================================================================*/ 263======================================================================*/
241 264
242static int tc589_config(struct pcmcia_device *link) 265static int tc589_config(struct pcmcia_device *link)
@@ -249,7 +272,7 @@ static int tc589_config(struct pcmcia_device *link)
249 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"}; 272 char *ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
250 u8 *buf; 273 u8 *buf;
251 size_t len; 274 size_t len;
252 275
253 dev_dbg(&link->dev, "3c589_config\n"); 276 dev_dbg(&link->dev, "3c589_config\n");
254 277
255 phys_addr = (__be16 *)dev->dev_addr; 278 phys_addr = (__be16 *)dev->dev_addr;
@@ -278,7 +301,7 @@ static int tc589_config(struct pcmcia_device *link)
278 ret = pcmcia_request_configuration(link, &link->conf); 301 ret = pcmcia_request_configuration(link, &link->conf);
279 if (ret) 302 if (ret)
280 goto failed; 303 goto failed;
281 304
282 dev->irq = link->irq.AssignedIRQ; 305 dev->irq = link->irq.AssignedIRQ;
283 dev->base_addr = link->io.BasePort1; 306 dev->base_addr = link->io.BasePort1;
284 ioaddr = dev->base_addr; 307 ioaddr = dev->base_addr;
@@ -312,7 +335,7 @@ static int tc589_config(struct pcmcia_device *link)
312 dev->if_port = if_port; 335 dev->if_port = if_port;
313 else 336 else
314 printk(KERN_ERR "3c589_cs: invalid if_port requested\n"); 337 printk(KERN_ERR "3c589_cs: invalid if_port requested\n");
315 338
316 link->dev_node = &lp->node; 339 link->dev_node = &lp->node;
317 SET_NETDEV_DEV(dev, &link->dev); 340 SET_NETDEV_DEV(dev, &link->dev);
318 341
@@ -324,13 +347,12 @@ static int tc589_config(struct pcmcia_device *link)
324 347
325 strcpy(lp->node.dev_name, dev->name); 348 strcpy(lp->node.dev_name, dev->name);
326 349
327 printk(KERN_INFO "%s: 3Com 3c%s, io %#3lx, irq %d, " 350 netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
328 "hw_addr %pM\n", 351 (multi ? "562" : "589"), dev->base_addr, dev->irq,
329 dev->name, (multi ? "562" : "589"), dev->base_addr, dev->irq, 352 dev->dev_addr);
330 dev->dev_addr); 353 netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
331 printk(KERN_INFO " %dK FIFO split %s Rx:Tx, %s xcvr\n", 354 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
332 (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3], 355 if_names[dev->if_port]);
333 if_names[dev->if_port]);
334 return 0; 356 return 0;
335 357
336failed: 358failed:
@@ -343,7 +365,7 @@ failed:
343 After a card is removed, tc589_release() will unregister the net 365 After a card is removed, tc589_release() will unregister the net
344 device, and release the PCMCIA configuration. If the device is 366 device, and release the PCMCIA configuration. If the device is
345 still open, this will be postponed until it is closed. 367 still open, this will be postponed until it is closed.
346 368
347======================================================================*/ 369======================================================================*/
348 370
349static void tc589_release(struct pcmcia_device *link) 371static void tc589_release(struct pcmcia_device *link)
@@ -365,7 +387,7 @@ static int tc589_resume(struct pcmcia_device *link)
365{ 387{
366 struct net_device *dev = link->priv; 388 struct net_device *dev = link->priv;
367 389
368 if (link->open) { 390 if (link->open) {
369 tc589_reset(dev); 391 tc589_reset(dev);
370 netif_device_attach(dev); 392 netif_device_attach(dev);
371 } 393 }
@@ -385,8 +407,7 @@ static void tc589_wait_for_completion(struct net_device *dev, int cmd)
385 while (--i > 0) 407 while (--i > 0)
386 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break; 408 if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
387 if (i == 0) 409 if (i == 0)
388 printk(KERN_WARNING "%s: command 0x%04x did not complete!\n", 410 netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
389 dev->name, cmd);
390} 411}
391 412
392/* 413/*
@@ -412,7 +433,7 @@ static void tc589_set_xcvr(struct net_device *dev, int if_port)
412{ 433{
413 struct el3_private *lp = netdev_priv(dev); 434 struct el3_private *lp = netdev_priv(dev);
414 unsigned int ioaddr = dev->base_addr; 435 unsigned int ioaddr = dev->base_addr;
415 436
416 EL3WINDOW(0); 437 EL3WINDOW(0);
417 switch (if_port) { 438 switch (if_port) {
418 case 0: case 1: outw(0, ioaddr + 6); break; 439 case 0: case 1: outw(0, ioaddr + 6); break;
@@ -435,14 +456,13 @@ static void dump_status(struct net_device *dev)
435{ 456{
436 unsigned int ioaddr = dev->base_addr; 457 unsigned int ioaddr = dev->base_addr;
437 EL3WINDOW(1); 458 EL3WINDOW(1);
438 printk(KERN_INFO " irq status %04x, rx status %04x, tx status " 459 netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
439 "%02x tx free %04x\n", inw(ioaddr+EL3_STATUS), 460 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
440 inw(ioaddr+RX_STATUS), inb(ioaddr+TX_STATUS), 461 inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
441 inw(ioaddr+TX_FREE));
442 EL3WINDOW(4); 462 EL3WINDOW(4);
443 printk(KERN_INFO " diagnostics: fifo %04x net %04x ethernet %04x" 463 netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
444 " media %04x\n", inw(ioaddr+0x04), inw(ioaddr+0x06), 464 inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
445 inw(ioaddr+0x08), inw(ioaddr+0x0a)); 465 inw(ioaddr+0x0a));
446 EL3WINDOW(1); 466 EL3WINDOW(1);
447} 467}
448 468
@@ -451,18 +471,18 @@ static void tc589_reset(struct net_device *dev)
451{ 471{
452 unsigned int ioaddr = dev->base_addr; 472 unsigned int ioaddr = dev->base_addr;
453 int i; 473 int i;
454 474
455 EL3WINDOW(0); 475 EL3WINDOW(0);
456 outw(0x0001, ioaddr + 4); /* Activate board. */ 476 outw(0x0001, ioaddr + 4); /* Activate board. */
457 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */ 477 outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
458 478
459 /* Set the station address in window 2. */ 479 /* Set the station address in window 2. */
460 EL3WINDOW(2); 480 EL3WINDOW(2);
461 for (i = 0; i < 6; i++) 481 for (i = 0; i < 6; i++)
462 outb(dev->dev_addr[i], ioaddr + i); 482 outb(dev->dev_addr[i], ioaddr + i);
463 483
464 tc589_set_xcvr(dev, dev->if_port); 484 tc589_set_xcvr(dev, dev->if_port);
465 485
466 /* Switch to the stats window, and clear all stats by reading. */ 486 /* Switch to the stats window, and clear all stats by reading. */
467 outw(StatsDisable, ioaddr + EL3_CMD); 487 outw(StatsDisable, ioaddr + EL3_CMD);
468 EL3WINDOW(6); 488 EL3WINDOW(6);
@@ -470,7 +490,7 @@ static void tc589_reset(struct net_device *dev)
470 inb(ioaddr+i); 490 inb(ioaddr+i);
471 inw(ioaddr + 10); 491 inw(ioaddr + 10);
472 inw(ioaddr + 12); 492 inw(ioaddr + 12);
473 493
474 /* Switch to register set 1 for normal use. */ 494 /* Switch to register set 1 for normal use. */
475 EL3WINDOW(1); 495 EL3WINDOW(1);
476 496
@@ -504,8 +524,7 @@ static int el3_config(struct net_device *dev, struct ifmap *map)
504 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) { 524 if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
505 if (map->port <= 3) { 525 if (map->port <= 3) {
506 dev->if_port = map->port; 526 dev->if_port = map->port;
507 printk(KERN_INFO "%s: switched to %s port\n", 527 netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
508 dev->name, if_names[dev->if_port]);
509 tc589_set_xcvr(dev, dev->if_port); 528 tc589_set_xcvr(dev, dev->if_port);
510 } else 529 } else
511 return -EINVAL; 530 return -EINVAL;
@@ -517,13 +536,13 @@ static int el3_open(struct net_device *dev)
517{ 536{
518 struct el3_private *lp = netdev_priv(dev); 537 struct el3_private *lp = netdev_priv(dev);
519 struct pcmcia_device *link = lp->p_dev; 538 struct pcmcia_device *link = lp->p_dev;
520 539
521 if (!pcmcia_dev_present(link)) 540 if (!pcmcia_dev_present(link))
522 return -ENODEV; 541 return -ENODEV;
523 542
524 link->open++; 543 link->open++;
525 netif_start_queue(dev); 544 netif_start_queue(dev);
526 545
527 tc589_reset(dev); 546 tc589_reset(dev);
528 init_timer(&lp->media); 547 init_timer(&lp->media);
529 lp->media.function = &media_check; 548 lp->media.function = &media_check;
@@ -533,15 +552,15 @@ static int el3_open(struct net_device *dev)
533 552
534 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", 553 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
535 dev->name, inw(dev->base_addr + EL3_STATUS)); 554 dev->name, inw(dev->base_addr + EL3_STATUS));
536 555
537 return 0; 556 return 0;
538} 557}
539 558
540static void el3_tx_timeout(struct net_device *dev) 559static void el3_tx_timeout(struct net_device *dev)
541{ 560{
542 unsigned int ioaddr = dev->base_addr; 561 unsigned int ioaddr = dev->base_addr;
543 562
544 printk(KERN_WARNING "%s: Transmit timed out!\n", dev->name); 563 netdev_warn(dev, "Transmit timed out!\n");
545 dump_status(dev); 564 dump_status(dev);
546 dev->stats.tx_errors++; 565 dev->stats.tx_errors++;
547 dev->trans_start = jiffies; 566 dev->trans_start = jiffies;
@@ -555,19 +574,18 @@ static void pop_tx_status(struct net_device *dev)
555{ 574{
556 unsigned int ioaddr = dev->base_addr; 575 unsigned int ioaddr = dev->base_addr;
557 int i; 576 int i;
558 577
559 /* Clear the Tx status stack. */ 578 /* Clear the Tx status stack. */
560 for (i = 32; i > 0; i--) { 579 for (i = 32; i > 0; i--) {
561 u_char tx_status = inb(ioaddr + TX_STATUS); 580 u_char tx_status = inb(ioaddr + TX_STATUS);
562 if (!(tx_status & 0x84)) break; 581 if (!(tx_status & 0x84)) break;
563 /* reset transmitter on jabber error or underrun */ 582 /* reset transmitter on jabber error or underrun */
564 if (tx_status & 0x30) 583 if (tx_status & 0x30)
565 tc589_wait_for_completion(dev, TxReset); 584 tc589_wait_for_completion(dev, TxReset);
566 if (tx_status & 0x38) { 585 if (tx_status & 0x38) {
567 pr_debug("%s: transmit error: status 0x%02x\n", 586 netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
568 dev->name, tx_status); 587 outw(TxEnable, ioaddr + EL3_CMD);
569 outw(TxEnable, ioaddr + EL3_CMD); 588 dev->stats.tx_aborted_errors++;
570 dev->stats.tx_aborted_errors++;
571 } 589 }
572 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ 590 outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
573 } 591 }
@@ -580,11 +598,10 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
580 struct el3_private *priv = netdev_priv(dev); 598 struct el3_private *priv = netdev_priv(dev);
581 unsigned long flags; 599 unsigned long flags;
582 600
583 pr_debug("%s: el3_start_xmit(length = %ld) called, " 601 netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
584 "status %4.4x.\n", dev->name, (long)skb->len, 602 (long)skb->len, inw(ioaddr + EL3_STATUS));
585 inw(ioaddr + EL3_STATUS));
586 603
587 spin_lock_irqsave(&priv->lock, flags); 604 spin_lock_irqsave(&priv->lock, flags);
588 605
589 dev->stats.tx_bytes += skb->len; 606 dev->stats.tx_bytes += skb->len;
590 607
@@ -602,9 +619,9 @@ static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
602 } 619 }
603 620
604 pop_tx_status(dev); 621 pop_tx_status(dev);
605 spin_unlock_irqrestore(&priv->lock, flags); 622 spin_unlock_irqrestore(&priv->lock, flags);
606 dev_kfree_skb(skb); 623 dev_kfree_skb(skb);
607 624
608 return NETDEV_TX_OK; 625 return NETDEV_TX_OK;
609} 626}
610 627
@@ -616,37 +633,32 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
616 unsigned int ioaddr; 633 unsigned int ioaddr;
617 __u16 status; 634 __u16 status;
618 int i = 0, handled = 1; 635 int i = 0, handled = 1;
619 636
620 if (!netif_device_present(dev)) 637 if (!netif_device_present(dev))
621 return IRQ_NONE; 638 return IRQ_NONE;
622 639
623 ioaddr = dev->base_addr; 640 ioaddr = dev->base_addr;
624 641
625 pr_debug("%s: interrupt, status %4.4x.\n", 642 netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
626 dev->name, inw(ioaddr + EL3_STATUS));
627 643
628 spin_lock(&lp->lock); 644 spin_lock(&lp->lock);
629 while ((status = inw(ioaddr + EL3_STATUS)) & 645 while ((status = inw(ioaddr + EL3_STATUS)) &
630 (IntLatch | RxComplete | StatsFull)) { 646 (IntLatch | RxComplete | StatsFull)) {
631 if ((status & 0xe000) != 0x2000) { 647 if ((status & 0xe000) != 0x2000) {
632 pr_debug("%s: interrupt from dead card\n", dev->name); 648 netdev_dbg(dev, "interrupt from dead card\n");
633 handled = 0; 649 handled = 0;
634 break; 650 break;
635 } 651 }
636
637 if (status & RxComplete) 652 if (status & RxComplete)
638 el3_rx(dev); 653 el3_rx(dev);
639
640 if (status & TxAvailable) { 654 if (status & TxAvailable) {
641 pr_debug(" TX room bit was handled.\n"); 655 netdev_dbg(dev, " TX room bit was handled.\n");
642 /* There's room in the FIFO for a full-sized packet. */ 656 /* There's room in the FIFO for a full-sized packet. */
643 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD); 657 outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
644 netif_wake_queue(dev); 658 netif_wake_queue(dev);
645 } 659 }
646
647 if (status & TxComplete) 660 if (status & TxComplete)
648 pop_tx_status(dev); 661 pop_tx_status(dev);
649
650 if (status & (AdapterFailure | RxEarly | StatsFull)) { 662 if (status & (AdapterFailure | RxEarly | StatsFull)) {
651 /* Handle all uncommon interrupts. */ 663 /* Handle all uncommon interrupts. */
652 if (status & StatsFull) /* Empty statistics. */ 664 if (status & StatsFull) /* Empty statistics. */
@@ -660,8 +672,8 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
660 EL3WINDOW(4); 672 EL3WINDOW(4);
661 fifo_diag = inw(ioaddr + 4); 673 fifo_diag = inw(ioaddr + 4);
662 EL3WINDOW(1); 674 EL3WINDOW(1);
663 printk(KERN_WARNING "%s: adapter failure, FIFO diagnostic" 675 netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
664 " register %04x.\n", dev->name, fifo_diag); 676 fifo_diag);
665 if (fifo_diag & 0x0400) { 677 if (fifo_diag & 0x0400) {
666 /* Tx overrun */ 678 /* Tx overrun */
667 tc589_wait_for_completion(dev, TxReset); 679 tc589_wait_for_completion(dev, TxReset);
@@ -676,22 +688,20 @@ static irqreturn_t el3_interrupt(int irq, void *dev_id)
676 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD); 688 outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
677 } 689 }
678 } 690 }
679
680 if (++i > 10) { 691 if (++i > 10) {
681 printk(KERN_ERR "%s: infinite loop in interrupt, " 692 netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
682 "status %4.4x.\n", dev->name, status); 693 status);
683 /* Clear all interrupts */ 694 /* Clear all interrupts */
684 outw(AckIntr | 0xFF, ioaddr + EL3_CMD); 695 outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
685 break; 696 break;
686 } 697 }
687 /* Acknowledge the IRQ. */ 698 /* Acknowledge the IRQ. */
688 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD); 699 outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
689 } 700 }
690
691 lp->last_irq = jiffies; 701 lp->last_irq = jiffies;
692 spin_unlock(&lp->lock); 702 spin_unlock(&lp->lock);
693 pr_debug("%s: exiting interrupt, status %4.4x.\n", 703 netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
694 dev->name, inw(ioaddr + EL3_STATUS)); 704 inw(ioaddr + EL3_STATUS));
695 return IRQ_RETVAL(handled); 705 return IRQ_RETVAL(handled);
696} 706}
697 707
@@ -710,7 +720,7 @@ static void media_check(unsigned long arg)
710 if ((inw(ioaddr + EL3_STATUS) & IntLatch) && 720 if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
711 (inb(ioaddr + EL3_TIMER) == 0xff)) { 721 (inb(ioaddr + EL3_TIMER) == 0xff)) {
712 if (!lp->fast_poll) 722 if (!lp->fast_poll)
713 printk(KERN_WARNING "%s: interrupt(s) dropped!\n", dev->name); 723 netdev_warn(dev, "interrupt(s) dropped!\n");
714 724
715 local_irq_save(flags); 725 local_irq_save(flags);
716 el3_interrupt(dev->irq, dev); 726 el3_interrupt(dev->irq, dev);
@@ -727,7 +737,7 @@ static void media_check(unsigned long arg)
727 737
728 /* lp->lock guards the EL3 window. Window should always be 1 except 738 /* lp->lock guards the EL3 window. Window should always be 1 except
729 when the lock is held */ 739 when the lock is held */
730 spin_lock_irqsave(&lp->lock, flags); 740 spin_lock_irqsave(&lp->lock, flags);
731 EL3WINDOW(4); 741 EL3WINDOW(4);
732 media = inw(ioaddr+WN4_MEDIA) & 0xc810; 742 media = inw(ioaddr+WN4_MEDIA) & 0xc810;
733 743
@@ -747,32 +757,30 @@ static void media_check(unsigned long arg)
747 if (media != lp->media_status) { 757 if (media != lp->media_status) {
748 if ((media & lp->media_status & 0x8000) && 758 if ((media & lp->media_status & 0x8000) &&
749 ((lp->media_status ^ media) & 0x0800)) 759 ((lp->media_status ^ media) & 0x0800))
750 printk(KERN_INFO "%s: %s link beat\n", dev->name, 760 netdev_info(dev, "%s link beat\n",
751 (lp->media_status & 0x0800 ? "lost" : "found")); 761 (lp->media_status & 0x0800 ? "lost" : "found"));
752 else if ((media & lp->media_status & 0x4000) && 762 else if ((media & lp->media_status & 0x4000) &&
753 ((lp->media_status ^ media) & 0x0010)) 763 ((lp->media_status ^ media) & 0x0010))
754 printk(KERN_INFO "%s: coax cable %s\n", dev->name, 764 netdev_info(dev, "coax cable %s\n",
755 (lp->media_status & 0x0010 ? "ok" : "problem")); 765 (lp->media_status & 0x0010 ? "ok" : "problem"));
756 if (dev->if_port == 0) { 766 if (dev->if_port == 0) {
757 if (media & 0x8000) { 767 if (media & 0x8000) {
758 if (media & 0x0800) 768 if (media & 0x0800)
759 printk(KERN_INFO "%s: flipped to 10baseT\n", 769 netdev_info(dev, "flipped to 10baseT\n");
760 dev->name);
761 else 770 else
762 tc589_set_xcvr(dev, 2); 771 tc589_set_xcvr(dev, 2);
763 } else if (media & 0x4000) { 772 } else if (media & 0x4000) {
764 if (media & 0x0010) 773 if (media & 0x0010)
765 tc589_set_xcvr(dev, 1); 774 tc589_set_xcvr(dev, 1);
766 else 775 else
767 printk(KERN_INFO "%s: flipped to 10base2\n", 776 netdev_info(dev, "flipped to 10base2\n");
768 dev->name);
769 } 777 }
770 } 778 }
771 lp->media_status = media; 779 lp->media_status = media;
772 } 780 }
773 781
774 EL3WINDOW(1); 782 EL3WINDOW(1);
775 spin_unlock_irqrestore(&lp->lock, flags); 783 spin_unlock_irqrestore(&lp->lock, flags);
776 784
777reschedule: 785reschedule:
778 lp->media.expires = jiffies + HZ; 786 lp->media.expires = jiffies + HZ;
@@ -786,7 +794,7 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
786 struct pcmcia_device *link = lp->p_dev; 794 struct pcmcia_device *link = lp->p_dev;
787 795
788 if (pcmcia_dev_present(link)) { 796 if (pcmcia_dev_present(link)) {
789 spin_lock_irqsave(&lp->lock, flags); 797 spin_lock_irqsave(&lp->lock, flags);
790 update_stats(dev); 798 update_stats(dev);
791 spin_unlock_irqrestore(&lp->lock, flags); 799 spin_unlock_irqrestore(&lp->lock, flags);
792 } 800 }
@@ -798,21 +806,21 @@ static struct net_device_stats *el3_get_stats(struct net_device *dev)
798 single-threaded if the device is active. This is expected to be a rare 806 single-threaded if the device is active. This is expected to be a rare
799 operation, and it's simpler for the rest of the driver to assume that 807 operation, and it's simpler for the rest of the driver to assume that
800 window 1 is always valid rather than use a special window-state variable. 808 window 1 is always valid rather than use a special window-state variable.
801 809
802 Caller must hold the lock for this 810 Caller must hold the lock for this
803*/ 811*/
804static void update_stats(struct net_device *dev) 812static void update_stats(struct net_device *dev)
805{ 813{
806 unsigned int ioaddr = dev->base_addr; 814 unsigned int ioaddr = dev->base_addr;
807 815
808 pr_debug("%s: updating the statistics.\n", dev->name); 816 netdev_dbg(dev, "updating the statistics.\n");
809 /* Turn off statistics updates while reading. */ 817 /* Turn off statistics updates while reading. */
810 outw(StatsDisable, ioaddr + EL3_CMD); 818 outw(StatsDisable, ioaddr + EL3_CMD);
811 /* Switch to the stats window, and read everything. */ 819 /* Switch to the stats window, and read everything. */
812 EL3WINDOW(6); 820 EL3WINDOW(6);
813 dev->stats.tx_carrier_errors += inb(ioaddr + 0); 821 dev->stats.tx_carrier_errors += inb(ioaddr + 0);
814 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); 822 dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
815 /* Multiple collisions. */ inb(ioaddr + 2); 823 /* Multiple collisions. */ inb(ioaddr + 2);
816 dev->stats.collisions += inb(ioaddr + 3); 824 dev->stats.collisions += inb(ioaddr + 3);
817 dev->stats.tx_window_errors += inb(ioaddr + 4); 825 dev->stats.tx_window_errors += inb(ioaddr + 4);
818 dev->stats.rx_fifo_errors += inb(ioaddr + 5); 826 dev->stats.rx_fifo_errors += inb(ioaddr + 5);
@@ -821,7 +829,7 @@ static void update_stats(struct net_device *dev)
821 /* Tx deferrals */ inb(ioaddr + 8); 829 /* Tx deferrals */ inb(ioaddr + 8);
822 /* Rx octets */ inw(ioaddr + 10); 830 /* Rx octets */ inw(ioaddr + 10);
823 /* Tx octets */ inw(ioaddr + 12); 831 /* Tx octets */ inw(ioaddr + 12);
824 832
825 /* Back to window 1, and turn statistics back on. */ 833 /* Back to window 1, and turn statistics back on. */
826 EL3WINDOW(1); 834 EL3WINDOW(1);
827 outw(StatsEnable, ioaddr + EL3_CMD); 835 outw(StatsEnable, ioaddr + EL3_CMD);
@@ -832,9 +840,9 @@ static int el3_rx(struct net_device *dev)
832 unsigned int ioaddr = dev->base_addr; 840 unsigned int ioaddr = dev->base_addr;
833 int worklimit = 32; 841 int worklimit = 32;
834 short rx_status; 842 short rx_status;
835 843
836 pr_debug("%s: in rx_packet(), status %4.4x, rx_status %4.4x.\n", 844 netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
837 dev->name, inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS)); 845 inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
838 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) && 846 while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
839 worklimit > 0) { 847 worklimit > 0) {
840 worklimit--; 848 worklimit--;
@@ -852,11 +860,11 @@ static int el3_rx(struct net_device *dev)
852 } else { 860 } else {
853 short pkt_len = rx_status & 0x7ff; 861 short pkt_len = rx_status & 0x7ff;
854 struct sk_buff *skb; 862 struct sk_buff *skb;
855 863
856 skb = dev_alloc_skb(pkt_len+5); 864 skb = dev_alloc_skb(pkt_len+5);
857 865
858 pr_debug(" Receiving packet size %d status %4.4x.\n", 866 netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
859 pkt_len, rx_status); 867 pkt_len, rx_status);
860 if (skb != NULL) { 868 if (skb != NULL) {
861 skb_reserve(skb, 2); 869 skb_reserve(skb, 2);
862 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len), 870 insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
@@ -866,8 +874,8 @@ static int el3_rx(struct net_device *dev)
866 dev->stats.rx_packets++; 874 dev->stats.rx_packets++;
867 dev->stats.rx_bytes += pkt_len; 875 dev->stats.rx_bytes += pkt_len;
868 } else { 876 } else {
869 pr_debug("%s: couldn't allocate a sk_buff of" 877 netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
870 " size %d.\n", dev->name, pkt_len); 878 pkt_len);
871 dev->stats.rx_dropped++; 879 dev->stats.rx_dropped++;
872 } 880 }
873 } 881 }
@@ -875,7 +883,7 @@ static int el3_rx(struct net_device *dev)
875 tc589_wait_for_completion(dev, RxDiscard); 883 tc589_wait_for_completion(dev, RxDiscard);
876 } 884 }
877 if (worklimit == 0) 885 if (worklimit == 0)
878 printk(KERN_WARNING "%s: too much work in el3_rx!\n", dev->name); 886 netdev_warn(dev, "too much work in el3_rx!\n");
879 return 0; 887 return 0;
880} 888}
881 889
@@ -906,17 +914,17 @@ static int el3_close(struct net_device *dev)
906 struct el3_private *lp = netdev_priv(dev); 914 struct el3_private *lp = netdev_priv(dev);
907 struct pcmcia_device *link = lp->p_dev; 915 struct pcmcia_device *link = lp->p_dev;
908 unsigned int ioaddr = dev->base_addr; 916 unsigned int ioaddr = dev->base_addr;
909 917
910 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name); 918 dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
911 919
912 if (pcmcia_dev_present(link)) { 920 if (pcmcia_dev_present(link)) {
913 /* Turn off statistics ASAP. We update dev->stats below. */ 921 /* Turn off statistics ASAP. We update dev->stats below. */
914 outw(StatsDisable, ioaddr + EL3_CMD); 922 outw(StatsDisable, ioaddr + EL3_CMD);
915 923
916 /* Disable the receiver and transmitter. */ 924 /* Disable the receiver and transmitter. */
917 outw(RxDisable, ioaddr + EL3_CMD); 925 outw(RxDisable, ioaddr + EL3_CMD);
918 outw(TxDisable, ioaddr + EL3_CMD); 926 outw(TxDisable, ioaddr + EL3_CMD);
919 927
920 if (dev->if_port == 2) 928 if (dev->if_port == 2)
921 /* Turn off thinnet power. Green! */ 929 /* Turn off thinnet power. Green! */
922 outw(StopCoax, ioaddr + EL3_CMD); 930 outw(StopCoax, ioaddr + EL3_CMD);
@@ -925,12 +933,12 @@ static int el3_close(struct net_device *dev)
925 EL3WINDOW(4); 933 EL3WINDOW(4);
926 outw(0, ioaddr + WN4_MEDIA); 934 outw(0, ioaddr + WN4_MEDIA);
927 } 935 }
928 936
929 /* Switching back to window 0 disables the IRQ. */ 937 /* Switching back to window 0 disables the IRQ. */
930 EL3WINDOW(0); 938 EL3WINDOW(0);
931 /* But we explicitly zero the IRQ line select anyway. */ 939 /* But we explicitly zero the IRQ line select anyway. */
932 outw(0x0f00, ioaddr + WN0_IRQ); 940 outw(0x0f00, ioaddr + WN0_IRQ);
933 941
934 /* Check if the card still exists */ 942 /* Check if the card still exists */
935 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000) 943 if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
936 update_stats(dev); 944 update_stats(dev);
@@ -939,7 +947,7 @@ static int el3_close(struct net_device *dev)
939 link->open--; 947 link->open--;
940 netif_stop_queue(dev); 948 netif_stop_queue(dev);
941 del_timer_sync(&lp->media); 949 del_timer_sync(&lp->media);
942 950
943 return 0; 951 return 0;
944} 952}
945 953
@@ -961,7 +969,7 @@ static struct pcmcia_driver tc589_driver = {
961 }, 969 },
962 .probe = tc589_probe, 970 .probe = tc589_probe,
963 .remove = tc589_detach, 971 .remove = tc589_detach,
964 .id_table = tc589_ids, 972 .id_table = tc589_ids,
965 .suspend = tc589_suspend, 973 .suspend = tc589_suspend,
966 .resume = tc589_resume, 974 .resume = tc589_resume,
967}; 975};
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 9f3d593f14ed..70fc9591821f 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -1622,11 +1622,11 @@ static struct net_device_stats *get_stats(struct net_device *dev)
1622 1622
1623static inline void make_mc_bits(u8 *bits, struct net_device *dev) 1623static inline void make_mc_bits(u8 *bits, struct net_device *dev)
1624{ 1624{
1625 struct dev_mc_list *dmi; 1625 struct netdev_hw_addr *ha;
1626 u32 crc; 1626 u32 crc;
1627 1627
1628 netdev_for_each_mc_addr(dmi, dev) { 1628 netdev_for_each_mc_addr(ha, dev) {
1629 crc = ether_crc(ETH_ALEN, dmi->dmi_addr); 1629 crc = ether_crc(ETH_ALEN, ha->addr);
1630 /* 1630 /*
1631 * The 8390 uses the 6 most significant bits of the 1631 * The 8390 uses the 6 most significant bits of the
1632 * CRC to index the multicast table. 1632 * CRC to index the multicast table.
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index b9dc80b9d04a..6734f7d6da98 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -1196,11 +1196,11 @@ static void set_rx_mode(struct net_device *dev)
1196 memset(mc_filter, 0x00, sizeof(mc_filter)); 1196 memset(mc_filter, 0x00, sizeof(mc_filter));
1197 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */ 1197 outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
1198 } else { 1198 } else {
1199 struct dev_mc_list *mclist; 1199 struct netdev_hw_addr *ha;
1200 1200
1201 memset(mc_filter, 0, sizeof(mc_filter)); 1201 memset(mc_filter, 0, sizeof(mc_filter));
1202 netdev_for_each_mc_addr(mclist, dev) { 1202 netdev_for_each_mc_addr(ha, dev) {
1203 unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26; 1203 unsigned int bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
1204 mc_filter[bit >> 3] |= (1 << (bit & 7)); 1204 mc_filter[bit >> 3] |= (1 << (bit & 7));
1205 } 1205 }
1206 outb(2, ioaddr + RX_MODE); /* Use normal mode. */ 1206 outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index c717b143f11a..c516c1996354 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1475,7 +1475,7 @@ static void set_multicast_list(struct net_device *dev)
1475{ 1475{
1476 mace_private *lp = netdev_priv(dev); 1476 mace_private *lp = netdev_priv(dev);
1477 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */ 1477 int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
1478 struct dev_mc_list *dmi; 1478 struct netdev_hw_addr *ha;
1479 1479
1480#ifdef PCMCIA_DEBUG 1480#ifdef PCMCIA_DEBUG
1481 { 1481 {
@@ -1495,8 +1495,8 @@ static void set_multicast_list(struct net_device *dev)
1495 if (num_addrs > 0) { 1495 if (num_addrs > 0) {
1496 /* Calculate multicast logical address filter */ 1496 /* Calculate multicast logical address filter */
1497 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN); 1497 memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
1498 netdev_for_each_mc_addr(dmi, dev) { 1498 netdev_for_each_mc_addr(ha, dev) {
1499 memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN); 1499 memcpy(adr, ha->addr, ETHER_ADDR_LEN);
1500 BuildLAF(lp->multicast_ladrf, adr); 1500 BuildLAF(lp->multicast_ladrf, adr);
1501 } 1501 }
1502 } 1502 }
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index fd9d6e34fda4..408f3d7b1545 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1621,10 +1621,10 @@ static void set_rx_mode(struct net_device *dev)
1621 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti; 1621 rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
1622 else { 1622 else {
1623 if (!netdev_mc_empty(dev)) { 1623 if (!netdev_mc_empty(dev)) {
1624 struct dev_mc_list *mc_addr; 1624 struct netdev_hw_addr *ha;
1625 1625
1626 netdev_for_each_mc_addr(mc_addr, dev) { 1626 netdev_for_each_mc_addr(ha, dev) {
1627 u_int position = ether_crc(6, mc_addr->dmi_addr); 1627 u_int position = ether_crc(6, ha->addr);
1628 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7); 1628 multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
1629 } 1629 }
1630 } 1630 }
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 4d1802e457be..656be931207a 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1398,7 +1398,7 @@ static void set_addresses(struct net_device *dev)
1398{ 1398{
1399 unsigned int ioaddr = dev->base_addr; 1399 unsigned int ioaddr = dev->base_addr;
1400 local_info_t *lp = netdev_priv(dev); 1400 local_info_t *lp = netdev_priv(dev);
1401 struct dev_mc_list *dmi; 1401 struct netdev_hw_addr *ha;
1402 struct set_address_info sa_info; 1402 struct set_address_info sa_info;
1403 int i; 1403 int i;
1404 1404
@@ -1413,10 +1413,10 @@ static void set_addresses(struct net_device *dev)
1413 1413
1414 set_address(&sa_info, dev->dev_addr); 1414 set_address(&sa_info, dev->dev_addr);
1415 i = 0; 1415 i = 0;
1416 netdev_for_each_mc_addr(dmi, dev) { 1416 netdev_for_each_mc_addr(ha, dev) {
1417 if (i++ == 9) 1417 if (i++ == 9)
1418 break; 1418 break;
1419 set_address(&sa_info, dmi->dmi_addr); 1419 set_address(&sa_info, ha->addr);
1420 } 1420 }
1421 while (i++ < 9) 1421 while (i++ < 9)
1422 set_address(&sa_info, dev->dev_addr); 1422 set_address(&sa_info, dev->dev_addr);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 084d78dd1637..a2254f749a9a 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -2590,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
2590 struct pcnet32_private *lp = netdev_priv(dev); 2590 struct pcnet32_private *lp = netdev_priv(dev);
2591 volatile struct pcnet32_init_block *ib = lp->init_block; 2591 volatile struct pcnet32_init_block *ib = lp->init_block;
2592 volatile __le16 *mcast_table = (__le16 *)ib->filter; 2592 volatile __le16 *mcast_table = (__le16 *)ib->filter;
2593 struct dev_mc_list *dmi; 2593 struct netdev_hw_addr *ha;
2594 unsigned long ioaddr = dev->base_addr; 2594 unsigned long ioaddr = dev->base_addr;
2595 char *addrs; 2595 char *addrs;
2596 int i; 2596 int i;
@@ -2611,8 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
2611 ib->filter[1] = 0; 2611 ib->filter[1] = 0;
2612 2612
2613 /* Add addresses */ 2613 /* Add addresses */
2614 netdev_for_each_mc_addr(dmi, dev) { 2614 netdev_for_each_mc_addr(ha, dev) {
2615 addrs = dmi->dmi_addr; 2615 addrs = ha->addr;
2616 2616
2617 /* multicast address? */ 2617 /* multicast address? */
2618 if (!(*addrs & 1)) 2618 if (!(*addrs & 1))
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index 4fed95e8350e..c12815679837 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -130,3 +130,11 @@ static void __exit bcm63xx_phy_exit(void)
130 130
131module_init(bcm63xx_phy_init); 131module_init(bcm63xx_phy_init);
132module_exit(bcm63xx_phy_exit); 132module_exit(bcm63xx_phy_exit);
133
134static struct mdio_device_id bcm63xx_tbl[] = {
135 { 0x00406000, 0xfffffc00 },
136 { 0x002bdc00, 0xfffffc00 },
137 { }
138};
139
140MODULE_DEVICE_TABLE(mdio, bcm63xx_tbl);
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f482fc4f8cf1..cecdbbd549ec 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -908,3 +908,19 @@ static void __exit broadcom_exit(void)
908 908
909module_init(broadcom_init); 909module_init(broadcom_init);
910module_exit(broadcom_exit); 910module_exit(broadcom_exit);
911
912static struct mdio_device_id broadcom_tbl[] = {
913 { 0x00206070, 0xfffffff0 },
914 { 0x002060e0, 0xfffffff0 },
915 { 0x002060c0, 0xfffffff0 },
916 { 0x002060b0, 0xfffffff0 },
917 { 0x0143bca0, 0xfffffff0 },
918 { 0x0143bcb0, 0xfffffff0 },
919 { PHY_ID_BCM50610, 0xfffffff0 },
920 { PHY_ID_BCM50610M, 0xfffffff0 },
921 { PHY_ID_BCM57780, 0xfffffff0 },
922 { PHY_ID_BCMAC131, 0xfffffff0 },
923 { }
924};
925
926MODULE_DEVICE_TABLE(mdio, broadcom_tbl);
diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c
index 92282b31d94b..1a325d63756b 100644
--- a/drivers/net/phy/cicada.c
+++ b/drivers/net/phy/cicada.c
@@ -158,3 +158,11 @@ static void __exit cicada_exit(void)
158 158
159module_init(cicada_init); 159module_init(cicada_init);
160module_exit(cicada_exit); 160module_exit(cicada_exit);
161
162static struct mdio_device_id cicada_tbl[] = {
163 { 0x000fc410, 0x000ffff0 },
164 { 0x000fc440, 0x000fffc0 },
165 { }
166};
167
168MODULE_DEVICE_TABLE(mdio, cicada_tbl);
diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c
index c722e95853ff..29c17617a2ec 100644
--- a/drivers/net/phy/davicom.c
+++ b/drivers/net/phy/davicom.c
@@ -218,3 +218,12 @@ static void __exit davicom_exit(void)
218 218
219module_init(davicom_init); 219module_init(davicom_init);
220module_exit(davicom_exit); 220module_exit(davicom_exit);
221
222static struct mdio_device_id davicom_tbl[] = {
223 { 0x0181b880, 0x0ffffff0 },
224 { 0x0181b8a0, 0x0ffffff0 },
225 { 0x00181b80, 0x0ffffff0 },
226 { }
227};
228
229MODULE_DEVICE_TABLE(mdio, davicom_tbl);
diff --git a/drivers/net/phy/et1011c.c b/drivers/net/phy/et1011c.c
index 7712ebeba9bf..13995f52d6af 100644
--- a/drivers/net/phy/et1011c.c
+++ b/drivers/net/phy/et1011c.c
@@ -110,3 +110,10 @@ static void __exit et1011c_exit(void)
110 110
111module_init(et1011c_init); 111module_init(et1011c_init);
112module_exit(et1011c_exit); 112module_exit(et1011c_exit);
113
114static struct mdio_device_id et1011c_tbl[] = {
115 { 0x0282f014, 0xfffffff0 },
116 { }
117};
118
119MODULE_DEVICE_TABLE(mdio, et1011c_tbl);
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 904208b95d4b..439adafeacb1 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -131,3 +131,10 @@ static void __exit ip175c_exit(void)
131 131
132module_init(ip175c_init); 132module_init(ip175c_init);
133module_exit(ip175c_exit); 133module_exit(ip175c_exit);
134
135static struct mdio_device_id icplus_tbl[] = {
136 { 0x02430d80, 0x0ffffff0 },
137 { }
138};
139
140MODULE_DEVICE_TABLE(mdio, icplus_tbl);
diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c
index 057ecaacde6b..8ee929b796d8 100644
--- a/drivers/net/phy/lxt.c
+++ b/drivers/net/phy/lxt.c
@@ -173,3 +173,11 @@ static void __exit lxt_exit(void)
173 173
174module_init(lxt_init); 174module_init(lxt_init);
175module_exit(lxt_exit); 175module_exit(lxt_exit);
176
177static struct mdio_device_id lxt_tbl[] = {
178 { 0x78100000, 0xfffffff0 },
179 { 0x001378e0, 0xfffffff0 },
180 { }
181};
182
183MODULE_DEVICE_TABLE(mdio, lxt_tbl);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 64c7fbe0a8e7..78b74e83ce5d 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -648,3 +648,16 @@ static void __exit marvell_exit(void)
648 648
649module_init(marvell_init); 649module_init(marvell_init);
650module_exit(marvell_exit); 650module_exit(marvell_exit);
651
652static struct mdio_device_id marvell_tbl[] = {
653 { 0x01410c60, 0xfffffff0 },
654 { 0x01410c90, 0xfffffff0 },
655 { 0x01410cc0, 0xfffffff0 },
656 { 0x01410e10, 0xfffffff0 },
657 { 0x01410cb0, 0xfffffff0 },
658 { 0x01410cd0, 0xfffffff0 },
659 { 0x01410e30, 0xfffffff0 },
660 { }
661};
662
663MODULE_DEVICE_TABLE(mdio, marvell_tbl);
diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
index 19e70d7e27ab..65391891d8c4 100644
--- a/drivers/net/phy/mdio-bitbang.c
+++ b/drivers/net/phy/mdio-bitbang.c
@@ -22,8 +22,13 @@
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/delay.h> 23#include <linux/delay.h>
24 24
25#define MDIO_READ 1 25#define MDIO_READ 2
26#define MDIO_WRITE 0 26#define MDIO_WRITE 1
27
28#define MDIO_C45 (1<<15)
29#define MDIO_C45_ADDR (MDIO_C45 | 0)
30#define MDIO_C45_READ (MDIO_C45 | 3)
31#define MDIO_C45_WRITE (MDIO_C45 | 1)
27 32
28#define MDIO_SETUP_TIME 10 33#define MDIO_SETUP_TIME 10
29#define MDIO_HOLD_TIME 10 34#define MDIO_HOLD_TIME 10
@@ -89,7 +94,7 @@ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits)
89/* Utility to send the preamble, address, and 94/* Utility to send the preamble, address, and
90 * register (common to read and write). 95 * register (common to read and write).
91 */ 96 */
92static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg) 97static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg)
93{ 98{
94 const struct mdiobb_ops *ops = ctrl->ops; 99 const struct mdiobb_ops *ops = ctrl->ops;
95 int i; 100 int i;
@@ -108,23 +113,56 @@ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int read, u8 phy, u8 reg)
108 for (i = 0; i < 32; i++) 113 for (i = 0; i < 32; i++)
109 mdiobb_send_bit(ctrl, 1); 114 mdiobb_send_bit(ctrl, 1);
110 115
111 /* send the start bit (01) and the read opcode (10) or write (10) */ 116 /* send the start bit (01) and the read opcode (10) or write (10).
117 Clause 45 operation uses 00 for the start and 11, 10 for
118 read/write */
112 mdiobb_send_bit(ctrl, 0); 119 mdiobb_send_bit(ctrl, 0);
113 mdiobb_send_bit(ctrl, 1); 120 if (op & MDIO_C45)
114 mdiobb_send_bit(ctrl, read); 121 mdiobb_send_bit(ctrl, 0);
115 mdiobb_send_bit(ctrl, !read); 122 else
123 mdiobb_send_bit(ctrl, 1);
124 mdiobb_send_bit(ctrl, (op >> 1) & 1);
125 mdiobb_send_bit(ctrl, (op >> 0) & 1);
116 126
117 mdiobb_send_num(ctrl, phy, 5); 127 mdiobb_send_num(ctrl, phy, 5);
118 mdiobb_send_num(ctrl, reg, 5); 128 mdiobb_send_num(ctrl, reg, 5);
119} 129}
120 130
131/* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the
132 lower 16 bits of the 21 bit address. This transfer is done identically to a
133 MDIO_WRITE except for a different code. To enable clause 45 mode or
134 MII_ADDR_C45 into the address. Theoretically clause 45 and normal devices
135 can exist on the same bus. Normal devices should ignore the MDIO_ADDR
136 phase. */
137static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr)
138{
139 unsigned int dev_addr = (addr >> 16) & 0x1F;
140 unsigned int reg = addr & 0xFFFF;
141 mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr);
142
143 /* send the turnaround (10) */
144 mdiobb_send_bit(ctrl, 1);
145 mdiobb_send_bit(ctrl, 0);
146
147 mdiobb_send_num(ctrl, reg, 16);
148
149 ctrl->ops->set_mdio_dir(ctrl, 0);
150 mdiobb_get_bit(ctrl);
151
152 return dev_addr;
153}
121 154
122static int mdiobb_read(struct mii_bus *bus, int phy, int reg) 155static int mdiobb_read(struct mii_bus *bus, int phy, int reg)
123{ 156{
124 struct mdiobb_ctrl *ctrl = bus->priv; 157 struct mdiobb_ctrl *ctrl = bus->priv;
125 int ret, i; 158 int ret, i;
126 159
127 mdiobb_cmd(ctrl, MDIO_READ, phy, reg); 160 if (reg & MII_ADDR_C45) {
161 reg = mdiobb_cmd_addr(ctrl, phy, reg);
162 mdiobb_cmd(ctrl, MDIO_C45_READ, phy, reg);
163 } else
164 mdiobb_cmd(ctrl, MDIO_READ, phy, reg);
165
128 ctrl->ops->set_mdio_dir(ctrl, 0); 166 ctrl->ops->set_mdio_dir(ctrl, 0);
129 167
130 /* check the turnaround bit: the PHY should be driving it to zero */ 168 /* check the turnaround bit: the PHY should be driving it to zero */
@@ -147,7 +185,11 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val)
147{ 185{
148 struct mdiobb_ctrl *ctrl = bus->priv; 186 struct mdiobb_ctrl *ctrl = bus->priv;
149 187
150 mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg); 188 if (reg & MII_ADDR_C45) {
189 reg = mdiobb_cmd_addr(ctrl, phy, reg);
190 mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, reg);
191 } else
192 mdiobb_cmd(ctrl, MDIO_WRITE, phy, reg);
151 193
152 /* send the turnaround (10) */ 194 /* send the turnaround (10) */
153 mdiobb_send_bit(ctrl, 1); 195 mdiobb_send_bit(ctrl, 1);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index e17b70291bbc..6a6b8199a0d6 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -208,7 +208,7 @@ EXPORT_SYMBOL(mdiobus_scan);
208 * because the bus read/write functions may wait for an interrupt 208 * because the bus read/write functions may wait for an interrupt
209 * to conclude the operation. 209 * to conclude the operation.
210 */ 210 */
211int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum) 211int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
212{ 212{
213 int retval; 213 int retval;
214 214
@@ -233,7 +233,7 @@ EXPORT_SYMBOL(mdiobus_read);
233 * because the bus read/write functions may wait for an interrupt 233 * because the bus read/write functions may wait for an interrupt
234 * to conclude the operation. 234 * to conclude the operation.
235 */ 235 */
236int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val) 236int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val)
237{ 237{
238 int err; 238 int err;
239 239
diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
index 6c636eb72089..729ab29ba28c 100644
--- a/drivers/net/phy/national.c
+++ b/drivers/net/phy/national.c
@@ -153,3 +153,10 @@ MODULE_LICENSE("GPL");
153 153
154module_init(ns_init); 154module_init(ns_init);
155module_exit(ns_exit); 155module_exit(ns_exit);
156
157static struct mdio_device_id ns_tbl[] = {
158 { DP83865_PHY_ID, 0xfffffff0 },
159 { }
160};
161
162MODULE_DEVICE_TABLE(mdio, ns_tbl);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index db1794546c56..1a99bb244106 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -149,6 +149,7 @@ EXPORT_SYMBOL(phy_scan_fixups);
149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id) 149struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
150{ 150{
151 struct phy_device *dev; 151 struct phy_device *dev;
152
152 /* We allocate the device, and initialize the 153 /* We allocate the device, and initialize the
153 * default values */ 154 * default values */
154 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 155 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
@@ -179,6 +180,17 @@ struct phy_device* phy_device_create(struct mii_bus *bus, int addr, int phy_id)
179 mutex_init(&dev->lock); 180 mutex_init(&dev->lock);
180 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); 181 INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine);
181 182
183 /* Request the appropriate module unconditionally; don't
184 bother trying to do so only if it isn't already loaded,
185 because that gets complicated. A hotplug event would have
186 done an unconditional modprobe anyway.
187 We don't do normal hotplug because it won't work for MDIO
188 -- because it relies on the device staying around for long
189 enough for the driver to get loaded. With MDIO, the NIC
190 driver will get bored and give up as soon as it finds that
191 there's no driver _already_ loaded. */
192 request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id));
193
182 return dev; 194 return dev;
183} 195}
184EXPORT_SYMBOL(phy_device_create); 196EXPORT_SYMBOL(phy_device_create);
diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c
index f6e190f73c32..6736b23f1b28 100644
--- a/drivers/net/phy/qsemi.c
+++ b/drivers/net/phy/qsemi.c
@@ -137,3 +137,10 @@ static void __exit qs6612_exit(void)
137 137
138module_init(qs6612_init); 138module_init(qs6612_init);
139module_exit(qs6612_exit); 139module_exit(qs6612_exit);
140
141static struct mdio_device_id qs6612_tbl[] = {
142 { 0x00181440, 0xfffffff0 },
143 { }
144};
145
146MODULE_DEVICE_TABLE(mdio, qs6612_tbl);
diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c
index a052a6744a51..f567c0e1aaa1 100644
--- a/drivers/net/phy/realtek.c
+++ b/drivers/net/phy/realtek.c
@@ -78,3 +78,10 @@ static void __exit realtek_exit(void)
78 78
79module_init(realtek_init); 79module_init(realtek_init);
80module_exit(realtek_exit); 80module_exit(realtek_exit);
81
82static struct mdio_device_id realtek_tbl[] = {
83 { 0x001cc912, 0x001fffff },
84 { }
85};
86
87MODULE_DEVICE_TABLE(mdio, realtek_tbl);
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index ed2644a57500..78fa988256fc 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -253,3 +253,14 @@ MODULE_LICENSE("GPL");
253 253
254module_init(smsc_init); 254module_init(smsc_init);
255module_exit(smsc_exit); 255module_exit(smsc_exit);
256
257static struct mdio_device_id smsc_tbl[] = {
258 { 0x0007c0a0, 0xfffffff0 },
259 { 0x0007c0b0, 0xfffffff0 },
260 { 0x0007c0c0, 0xfffffff0 },
261 { 0x0007c0d0, 0xfffffff0 },
262 { 0x0007c0f0, 0xfffffff0 },
263 { }
264};
265
266MODULE_DEVICE_TABLE(mdio, smsc_tbl);
diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c
index 6bdb0d53aaf9..72290099e5e1 100644
--- a/drivers/net/phy/ste10Xp.c
+++ b/drivers/net/phy/ste10Xp.c
@@ -132,6 +132,14 @@ static void __exit ste10Xp_exit(void)
132module_init(ste10Xp_init); 132module_init(ste10Xp_init);
133module_exit(ste10Xp_exit); 133module_exit(ste10Xp_exit);
134 134
135static struct mdio_device_id ste10Xp_tbl[] = {
136 { STE101P_PHY_ID, 0xfffffff0 },
137 { STE100P_PHY_ID, 0xffffffff },
138 { }
139};
140
141MODULE_DEVICE_TABLE(mdio, ste10Xp_tbl);
142
135MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver"); 143MODULE_DESCRIPTION("STMicroelectronics STe10Xp PHY driver");
136MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 144MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
137MODULE_LICENSE("GPL"); 145MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index dd3b2447e85a..45cce50a2799 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -191,3 +191,11 @@ static void __exit vsc82xx_exit(void)
191 191
192module_init(vsc82xx_init); 192module_init(vsc82xx_init);
193module_exit(vsc82xx_exit); 193module_exit(vsc82xx_exit);
194
195static struct mdio_device_id vitesse_tbl[] = {
196 { PHY_ID_VSC8244, 0x000fffc0 },
197 { PHY_ID_VSC8221, 0x000ffff0 },
198 { }
199};
200
201MODULE_DEVICE_TABLE(mdio, vitesse_tbl);
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 6e281bc825e5..35f195329fdd 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -2164,6 +2164,24 @@ int ppp_unit_number(struct ppp_channel *chan)
2164} 2164}
2165 2165
2166/* 2166/*
2167 * Return the PPP device interface name of a channel.
2168 */
2169char *ppp_dev_name(struct ppp_channel *chan)
2170{
2171 struct channel *pch = chan->ppp;
2172 char *name = NULL;
2173
2174 if (pch) {
2175 read_lock_bh(&pch->upl);
2176 if (pch->ppp && pch->ppp->dev)
2177 name = pch->ppp->dev->name;
2178 read_unlock_bh(&pch->upl);
2179 }
2180 return name;
2181}
2182
2183
2184/*
2167 * Disconnect a channel from the generic layer. 2185 * Disconnect a channel from the generic layer.
2168 * This must be called in process context. 2186 * This must be called in process context.
2169 */ 2187 */
@@ -2891,6 +2909,7 @@ EXPORT_SYMBOL(ppp_register_channel);
2891EXPORT_SYMBOL(ppp_unregister_channel); 2909EXPORT_SYMBOL(ppp_unregister_channel);
2892EXPORT_SYMBOL(ppp_channel_index); 2910EXPORT_SYMBOL(ppp_channel_index);
2893EXPORT_SYMBOL(ppp_unit_number); 2911EXPORT_SYMBOL(ppp_unit_number);
2912EXPORT_SYMBOL(ppp_dev_name);
2894EXPORT_SYMBOL(ppp_input); 2913EXPORT_SYMBOL(ppp_input);
2895EXPORT_SYMBOL(ppp_input_error); 2914EXPORT_SYMBOL(ppp_input_error);
2896EXPORT_SYMBOL(ppp_output_wakeup); 2915EXPORT_SYMBOL(ppp_output_wakeup);
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
deleted file mode 100644
index 449a9825200d..000000000000
--- a/drivers/net/pppol2tp.c
+++ /dev/null
@@ -1,2680 +0,0 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 1.0.0
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * License:
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24/* This driver handles only L2TP data frames; control frames are handled by a
25 * userspace application.
26 *
27 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
28 * attaches it to a bound UDP socket with local tunnel_id / session_id and
29 * peer tunnel_id / session_id set. Data can then be sent or received using
30 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
31 * can be read or modified using ioctl() or [gs]etsockopt() calls.
32 *
33 * When a PPPoL2TP socket is connected with local and peer session_id values
34 * zero, the socket is treated as a special tunnel management socket.
35 *
36 * Here's example userspace code to create a socket for sending/receiving data
37 * over an L2TP session:-
38 *
39 * struct sockaddr_pppol2tp sax;
40 * int fd;
41 * int session_fd;
42 *
43 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
44 *
45 * sax.sa_family = AF_PPPOX;
46 * sax.sa_protocol = PX_PROTO_OL2TP;
47 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
48 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
49 * sax.pppol2tp.addr.sin_port = addr->sin_port;
50 * sax.pppol2tp.addr.sin_family = AF_INET;
51 * sax.pppol2tp.s_tunnel = tunnel_id;
52 * sax.pppol2tp.s_session = session_id;
53 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
54 * sax.pppol2tp.d_session = peer_session_id;
55 *
56 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
57 *
58 * A pppd plugin that allows PPP traffic to be carried over L2TP using
59 * this driver is available from the OpenL2TP project at
60 * http://openl2tp.sourceforge.net.
61 */
62
63#include <linux/module.h>
64#include <linux/string.h>
65#include <linux/list.h>
66#include <asm/uaccess.h>
67
68#include <linux/kernel.h>
69#include <linux/spinlock.h>
70#include <linux/kthread.h>
71#include <linux/sched.h>
72#include <linux/slab.h>
73#include <linux/errno.h>
74#include <linux/jiffies.h>
75
76#include <linux/netdevice.h>
77#include <linux/net.h>
78#include <linux/inetdevice.h>
79#include <linux/skbuff.h>
80#include <linux/init.h>
81#include <linux/ip.h>
82#include <linux/udp.h>
83#include <linux/if_pppox.h>
84#include <linux/if_pppol2tp.h>
85#include <net/sock.h>
86#include <linux/ppp_channel.h>
87#include <linux/ppp_defs.h>
88#include <linux/if_ppp.h>
89#include <linux/file.h>
90#include <linux/hash.h>
91#include <linux/sort.h>
92#include <linux/proc_fs.h>
93#include <linux/nsproxy.h>
94#include <net/net_namespace.h>
95#include <net/netns/generic.h>
96#include <net/dst.h>
97#include <net/ip.h>
98#include <net/udp.h>
99#include <net/xfrm.h>
100
101#include <asm/byteorder.h>
102#include <asm/atomic.h>
103
104
105#define PPPOL2TP_DRV_VERSION "V1.0"
106
107/* L2TP header constants */
108#define L2TP_HDRFLAG_T 0x8000
109#define L2TP_HDRFLAG_L 0x4000
110#define L2TP_HDRFLAG_S 0x0800
111#define L2TP_HDRFLAG_O 0x0200
112#define L2TP_HDRFLAG_P 0x0100
113
114#define L2TP_HDR_VER_MASK 0x000F
115#define L2TP_HDR_VER 0x0002
116
117/* Space for UDP, L2TP and PPP headers */
118#define PPPOL2TP_HEADER_OVERHEAD 40
119
120/* Just some random numbers */
121#define L2TP_TUNNEL_MAGIC 0x42114DDA
122#define L2TP_SESSION_MAGIC 0x0C04EB7D
123
124#define PPPOL2TP_HASH_BITS 4
125#define PPPOL2TP_HASH_SIZE (1 << PPPOL2TP_HASH_BITS)
126
127/* Default trace flags */
128#define PPPOL2TP_DEFAULT_DEBUG_FLAGS 0
129
130#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
131 do { \
132 if ((_mask) & (_type)) \
133 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
134 } while(0)
135
136/* Number of bytes to build transmit L2TP headers.
137 * Unfortunately the size is different depending on whether sequence numbers
138 * are enabled.
139 */
140#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
141#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
142
143struct pppol2tp_tunnel;
144
145/* Describes a session. It is the sk_user_data field in the PPPoL2TP
146 * socket. Contains information to determine incoming packets and transmit
147 * outgoing ones.
148 */
149struct pppol2tp_session
150{
151 int magic; /* should be
152 * L2TP_SESSION_MAGIC */
153 int owner; /* pid that opened the socket */
154
155 struct sock *sock; /* Pointer to the session
156 * PPPoX socket */
157 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
158 * socket */
159
160 struct pppol2tp_addr tunnel_addr; /* Description of tunnel */
161
162 struct pppol2tp_tunnel *tunnel; /* back pointer to tunnel
163 * context */
164
165 char name[20]; /* "sess xxxxx/yyyyy", where
166 * x=tunnel_id, y=session_id */
167 int mtu;
168 int mru;
169 int flags; /* accessed by PPPIOCGFLAGS.
170 * Unused. */
171 unsigned recv_seq:1; /* expect receive packets with
172 * sequence numbers? */
173 unsigned send_seq:1; /* send packets with sequence
174 * numbers? */
175 unsigned lns_mode:1; /* behave as LNS? LAC enables
176 * sequence numbers under
177 * control of LNS. */
178 int debug; /* bitmask of debug message
179 * categories */
180 int reorder_timeout; /* configured reorder timeout
181 * (in jiffies) */
182 u16 nr; /* session NR state (receive) */
183 u16 ns; /* session NR state (send) */
184 struct sk_buff_head reorder_q; /* receive reorder queue */
185 struct pppol2tp_ioc_stats stats;
186 struct hlist_node hlist; /* Hash list node */
187};
188
189/* The sk_user_data field of the tunnel's UDP socket. It contains info to track
190 * all the associated sessions so incoming packets can be sorted out
191 */
192struct pppol2tp_tunnel
193{
194 int magic; /* Should be L2TP_TUNNEL_MAGIC */
195 rwlock_t hlist_lock; /* protect session_hlist */
196 struct hlist_head session_hlist[PPPOL2TP_HASH_SIZE];
197 /* hashed list of sessions,
198 * hashed by id */
199 int debug; /* bitmask of debug message
200 * categories */
201 char name[12]; /* "tunl xxxxx" */
202 struct pppol2tp_ioc_stats stats;
203
204 void (*old_sk_destruct)(struct sock *);
205
206 struct sock *sock; /* Parent socket */
207 struct list_head list; /* Keep a list of all open
208 * prepared sockets */
209 struct net *pppol2tp_net; /* the net we belong to */
210
211 atomic_t ref_count;
212};
213
214/* Private data stored for received packets in the skb.
215 */
216struct pppol2tp_skb_cb {
217 u16 ns;
218 u16 nr;
219 u16 has_seq;
220 u16 length;
221 unsigned long expires;
222};
223
224#define PPPOL2TP_SKB_CB(skb) ((struct pppol2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
225
226static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
227static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel);
228
229static atomic_t pppol2tp_tunnel_count;
230static atomic_t pppol2tp_session_count;
231static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
232static const struct proto_ops pppol2tp_ops;
233
234/* per-net private data for this module */
235static int pppol2tp_net_id __read_mostly;
236struct pppol2tp_net {
237 struct list_head pppol2tp_tunnel_list;
238 rwlock_t pppol2tp_tunnel_list_lock;
239};
240
241static inline struct pppol2tp_net *pppol2tp_pernet(struct net *net)
242{
243 BUG_ON(!net);
244
245 return net_generic(net, pppol2tp_net_id);
246}
247
248/* Helpers to obtain tunnel/session contexts from sockets.
249 */
250static inline struct pppol2tp_session *pppol2tp_sock_to_session(struct sock *sk)
251{
252 struct pppol2tp_session *session;
253
254 if (sk == NULL)
255 return NULL;
256
257 sock_hold(sk);
258 session = (struct pppol2tp_session *)(sk->sk_user_data);
259 if (session == NULL) {
260 sock_put(sk);
261 goto out;
262 }
263
264 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
265out:
266 return session;
267}
268
269static inline struct pppol2tp_tunnel *pppol2tp_sock_to_tunnel(struct sock *sk)
270{
271 struct pppol2tp_tunnel *tunnel;
272
273 if (sk == NULL)
274 return NULL;
275
276 sock_hold(sk);
277 tunnel = (struct pppol2tp_tunnel *)(sk->sk_user_data);
278 if (tunnel == NULL) {
279 sock_put(sk);
280 goto out;
281 }
282
283 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
284out:
285 return tunnel;
286}
287
288/* Tunnel reference counts. Incremented per session that is added to
289 * the tunnel.
290 */
291static inline void pppol2tp_tunnel_inc_refcount(struct pppol2tp_tunnel *tunnel)
292{
293 atomic_inc(&tunnel->ref_count);
294}
295
296static inline void pppol2tp_tunnel_dec_refcount(struct pppol2tp_tunnel *tunnel)
297{
298 if (atomic_dec_and_test(&tunnel->ref_count))
299 pppol2tp_tunnel_free(tunnel);
300}
301
302/* Session hash list.
303 * The session_id SHOULD be random according to RFC2661, but several
304 * L2TP implementations (Cisco and Microsoft) use incrementing
305 * session_ids. So we do a real hash on the session_id, rather than a
306 * simple bitmask.
307 */
308static inline struct hlist_head *
309pppol2tp_session_id_hash(struct pppol2tp_tunnel *tunnel, u16 session_id)
310{
311 unsigned long hash_val = (unsigned long) session_id;
312 return &tunnel->session_hlist[hash_long(hash_val, PPPOL2TP_HASH_BITS)];
313}
314
315/* Lookup a session by id
316 */
317static struct pppol2tp_session *
318pppol2tp_session_find(struct pppol2tp_tunnel *tunnel, u16 session_id)
319{
320 struct hlist_head *session_list =
321 pppol2tp_session_id_hash(tunnel, session_id);
322 struct pppol2tp_session *session;
323 struct hlist_node *walk;
324
325 read_lock_bh(&tunnel->hlist_lock);
326 hlist_for_each_entry(session, walk, session_list, hlist) {
327 if (session->tunnel_addr.s_session == session_id) {
328 read_unlock_bh(&tunnel->hlist_lock);
329 return session;
330 }
331 }
332 read_unlock_bh(&tunnel->hlist_lock);
333
334 return NULL;
335}
336
337/* Lookup a tunnel by id
338 */
339static struct pppol2tp_tunnel *pppol2tp_tunnel_find(struct net *net, u16 tunnel_id)
340{
341 struct pppol2tp_tunnel *tunnel;
342 struct pppol2tp_net *pn = pppol2tp_pernet(net);
343
344 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
345 list_for_each_entry(tunnel, &pn->pppol2tp_tunnel_list, list) {
346 if (tunnel->stats.tunnel_id == tunnel_id) {
347 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
348 return tunnel;
349 }
350 }
351 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
352
353 return NULL;
354}
355
356/*****************************************************************************
357 * Receive data handling
358 *****************************************************************************/
359
360/* Queue a skb in order. We come here only if the skb has an L2TP sequence
361 * number.
362 */
363static void pppol2tp_recv_queue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
364{
365 struct sk_buff *skbp;
366 struct sk_buff *tmp;
367 u16 ns = PPPOL2TP_SKB_CB(skb)->ns;
368
369 spin_lock_bh(&session->reorder_q.lock);
370 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
371 if (PPPOL2TP_SKB_CB(skbp)->ns > ns) {
372 __skb_queue_before(&session->reorder_q, skbp, skb);
373 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
374 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
375 session->name, ns, PPPOL2TP_SKB_CB(skbp)->ns,
376 skb_queue_len(&session->reorder_q));
377 session->stats.rx_oos_packets++;
378 goto out;
379 }
380 }
381
382 __skb_queue_tail(&session->reorder_q, skb);
383
384out:
385 spin_unlock_bh(&session->reorder_q.lock);
386}
387
388/* Dequeue a single skb.
389 */
390static void pppol2tp_recv_dequeue_skb(struct pppol2tp_session *session, struct sk_buff *skb)
391{
392 struct pppol2tp_tunnel *tunnel = session->tunnel;
393 int length = PPPOL2TP_SKB_CB(skb)->length;
394 struct sock *session_sock = NULL;
395
396 /* We're about to requeue the skb, so return resources
397 * to its current owner (a socket receive buffer).
398 */
399 skb_orphan(skb);
400
401 tunnel->stats.rx_packets++;
402 tunnel->stats.rx_bytes += length;
403 session->stats.rx_packets++;
404 session->stats.rx_bytes += length;
405
406 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
407 /* Bump our Nr */
408 session->nr++;
409 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
410 "%s: updated nr to %hu\n", session->name, session->nr);
411 }
412
413 /* If the socket is bound, send it in to PPP's input queue. Otherwise
414 * queue it on the session socket.
415 */
416 session_sock = session->sock;
417 if (session_sock->sk_state & PPPOX_BOUND) {
418 struct pppox_sock *po;
419 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
420 "%s: recv %d byte data frame, passing to ppp\n",
421 session->name, length);
422
423 /* We need to forget all info related to the L2TP packet
424 * gathered in the skb as we are going to reuse the same
425 * skb for the inner packet.
426 * Namely we need to:
427 * - reset xfrm (IPSec) information as it applies to
428 * the outer L2TP packet and not to the inner one
429 * - release the dst to force a route lookup on the inner
430 * IP packet since skb->dst currently points to the dst
431 * of the UDP tunnel
432 * - reset netfilter information as it doesn't apply
433 * to the inner packet either
434 */
435 secpath_reset(skb);
436 skb_dst_drop(skb);
437 nf_reset(skb);
438
439 po = pppox_sk(session_sock);
440 ppp_input(&po->chan, skb);
441 } else {
442 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
443 "%s: socket not bound\n", session->name);
444
445 /* Not bound. Nothing we can do, so discard. */
446 session->stats.rx_errors++;
447 kfree_skb(skb);
448 }
449
450 sock_put(session->sock);
451}
452
453/* Dequeue skbs from the session's reorder_q, subject to packet order.
454 * Skbs that have been in the queue for too long are simply discarded.
455 */
456static void pppol2tp_recv_dequeue(struct pppol2tp_session *session)
457{
458 struct sk_buff *skb;
459 struct sk_buff *tmp;
460
461 /* If the pkt at the head of the queue has the nr that we
462 * expect to send up next, dequeue it and any other
463 * in-sequence packets behind it.
464 */
465 spin_lock_bh(&session->reorder_q.lock);
466 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
467 if (time_after(jiffies, PPPOL2TP_SKB_CB(skb)->expires)) {
468 session->stats.rx_seq_discards++;
469 session->stats.rx_errors++;
470 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
471 "%s: oos pkt %hu len %d discarded (too old), "
472 "waiting for %hu, reorder_q_len=%d\n",
473 session->name, PPPOL2TP_SKB_CB(skb)->ns,
474 PPPOL2TP_SKB_CB(skb)->length, session->nr,
475 skb_queue_len(&session->reorder_q));
476 __skb_unlink(skb, &session->reorder_q);
477 kfree_skb(skb);
478 sock_put(session->sock);
479 continue;
480 }
481
482 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
483 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
484 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
485 "%s: holding oos pkt %hu len %d, "
486 "waiting for %hu, reorder_q_len=%d\n",
487 session->name, PPPOL2TP_SKB_CB(skb)->ns,
488 PPPOL2TP_SKB_CB(skb)->length, session->nr,
489 skb_queue_len(&session->reorder_q));
490 goto out;
491 }
492 }
493 __skb_unlink(skb, &session->reorder_q);
494
495 /* Process the skb. We release the queue lock while we
496 * do so to let other contexts process the queue.
497 */
498 spin_unlock_bh(&session->reorder_q.lock);
499 pppol2tp_recv_dequeue_skb(session, skb);
500 spin_lock_bh(&session->reorder_q.lock);
501 }
502
503out:
504 spin_unlock_bh(&session->reorder_q.lock);
505}
506
507static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
508 struct sk_buff *skb)
509{
510 struct udphdr *uh = udp_hdr(skb);
511 u16 ulen = ntohs(uh->len);
512 struct inet_sock *inet;
513 __wsum psum;
514
515 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
516 return 0;
517
518 inet = inet_sk(sk);
519 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
520 IPPROTO_UDP, 0);
521
522 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
523 !csum_fold(csum_add(psum, skb->csum)))
524 return 0;
525
526 skb->csum = psum;
527
528 return __skb_checksum_complete(skb);
529}
530
531/* Internal receive frame. Do the real work of receiving an L2TP data frame
532 * here. The skb is not on a list when we get here.
533 * Returns 0 if the packet was a data packet and was successfully passed on.
534 * Returns 1 if the packet was not a good data packet and could not be
535 * forwarded. All such packets are passed up to userspace to deal with.
536 */
537static int pppol2tp_recv_core(struct sock *sock, struct sk_buff *skb)
538{
539 struct pppol2tp_session *session = NULL;
540 struct pppol2tp_tunnel *tunnel;
541 unsigned char *ptr, *optr;
542 u16 hdrflags;
543 u16 tunnel_id, session_id;
544 int length;
545 int offset;
546
547 tunnel = pppol2tp_sock_to_tunnel(sock);
548 if (tunnel == NULL)
549 goto no_tunnel;
550
551 if (tunnel->sock && pppol2tp_verify_udp_checksum(tunnel->sock, skb))
552 goto discard_bad_csum;
553
554 /* UDP always verifies the packet length. */
555 __skb_pull(skb, sizeof(struct udphdr));
556
557 /* Short packet? */
558 if (!pskb_may_pull(skb, 12)) {
559 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
560 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
561 goto error;
562 }
563
564 /* Point to L2TP header */
565 optr = ptr = skb->data;
566
567 /* Get L2TP header flags */
568 hdrflags = ntohs(*(__be16*)ptr);
569
570 /* Trace packet contents, if enabled */
571 if (tunnel->debug & PPPOL2TP_MSG_DATA) {
572 length = min(16u, skb->len);
573 if (!pskb_may_pull(skb, length))
574 goto error;
575
576 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
577
578 offset = 0;
579 do {
580 printk(" %02X", ptr[offset]);
581 } while (++offset < length);
582
583 printk("\n");
584 }
585
586 /* Get length of L2TP packet */
587 length = skb->len;
588
589 /* If type is control packet, it is handled by userspace. */
590 if (hdrflags & L2TP_HDRFLAG_T) {
591 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
592 "%s: recv control packet, len=%d\n", tunnel->name, length);
593 goto error;
594 }
595
596 /* Skip flags */
597 ptr += 2;
598
599 /* If length is present, skip it */
600 if (hdrflags & L2TP_HDRFLAG_L)
601 ptr += 2;
602
603 /* Extract tunnel and session ID */
604 tunnel_id = ntohs(*(__be16 *) ptr);
605 ptr += 2;
606 session_id = ntohs(*(__be16 *) ptr);
607 ptr += 2;
608
609 /* Find the session context */
610 session = pppol2tp_session_find(tunnel, session_id);
611 if (!session) {
612 /* Not found? Pass to userspace to deal with */
613 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
614 "%s: no socket found (%hu/%hu). Passing up.\n",
615 tunnel->name, tunnel_id, session_id);
616 goto error;
617 }
618 sock_hold(session->sock);
619
620 /* The ref count on the socket was increased by the above call since
621 * we now hold a pointer to the session. Take care to do sock_put()
622 * when exiting this function from now on...
623 */
624
625 /* Handle the optional sequence numbers. If we are the LAC,
626 * enable/disable sequence numbers under the control of the LNS. If
627 * no sequence numbers present but we were expecting them, discard
628 * frame.
629 */
630 if (hdrflags & L2TP_HDRFLAG_S) {
631 u16 ns, nr;
632 ns = ntohs(*(__be16 *) ptr);
633 ptr += 2;
634 nr = ntohs(*(__be16 *) ptr);
635 ptr += 2;
636
637 /* Received a packet with sequence numbers. If we're the LNS,
638 * check if we sre sending sequence numbers and if not,
639 * configure it so.
640 */
641 if ((!session->lns_mode) && (!session->send_seq)) {
642 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
643 "%s: requested to enable seq numbers by LNS\n",
644 session->name);
645 session->send_seq = -1;
646 }
647
648 /* Store L2TP info in the skb */
649 PPPOL2TP_SKB_CB(skb)->ns = ns;
650 PPPOL2TP_SKB_CB(skb)->nr = nr;
651 PPPOL2TP_SKB_CB(skb)->has_seq = 1;
652
653 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
654 "%s: recv data ns=%hu, nr=%hu, session nr=%hu\n",
655 session->name, ns, nr, session->nr);
656 } else {
657 /* No sequence numbers.
658 * If user has configured mandatory sequence numbers, discard.
659 */
660 if (session->recv_seq) {
661 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
662 "%s: recv data has no seq numbers when required. "
663 "Discarding\n", session->name);
664 session->stats.rx_seq_discards++;
665 goto discard;
666 }
667
668 /* If we're the LAC and we're sending sequence numbers, the
669 * LNS has requested that we no longer send sequence numbers.
670 * If we're the LNS and we're sending sequence numbers, the
671 * LAC is broken. Discard the frame.
672 */
673 if ((!session->lns_mode) && (session->send_seq)) {
674 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_INFO,
675 "%s: requested to disable seq numbers by LNS\n",
676 session->name);
677 session->send_seq = 0;
678 } else if (session->send_seq) {
679 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_WARNING,
680 "%s: recv data has no seq numbers when required. "
681 "Discarding\n", session->name);
682 session->stats.rx_seq_discards++;
683 goto discard;
684 }
685
686 /* Store L2TP info in the skb */
687 PPPOL2TP_SKB_CB(skb)->has_seq = 0;
688 }
689
690 /* If offset bit set, skip it. */
691 if (hdrflags & L2TP_HDRFLAG_O) {
692 offset = ntohs(*(__be16 *)ptr);
693 ptr += 2 + offset;
694 }
695
696 offset = ptr - optr;
697 if (!pskb_may_pull(skb, offset))
698 goto discard;
699
700 __skb_pull(skb, offset);
701
702 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
703 * don't send the PPP header (PPP header compression enabled), but
704 * other clients can include the header. So we cope with both cases
705 * here. The PPP header is always FF03 when using L2TP.
706 *
707 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
708 * the field may be unaligned.
709 */
710 if (!pskb_may_pull(skb, 2))
711 goto discard;
712
713 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
714 skb_pull(skb, 2);
715
716 /* Prepare skb for adding to the session's reorder_q. Hold
717 * packets for max reorder_timeout or 1 second if not
718 * reordering.
719 */
720 PPPOL2TP_SKB_CB(skb)->length = length;
721 PPPOL2TP_SKB_CB(skb)->expires = jiffies +
722 (session->reorder_timeout ? session->reorder_timeout : HZ);
723
724 /* Add packet to the session's receive queue. Reordering is done here, if
725 * enabled. Saved L2TP protocol info is stored in skb->sb[].
726 */
727 if (PPPOL2TP_SKB_CB(skb)->has_seq) {
728 if (session->reorder_timeout != 0) {
729 /* Packet reordering enabled. Add skb to session's
730 * reorder queue, in order of ns.
731 */
732 pppol2tp_recv_queue_skb(session, skb);
733 } else {
734 /* Packet reordering disabled. Discard out-of-sequence
735 * packets
736 */
737 if (PPPOL2TP_SKB_CB(skb)->ns != session->nr) {
738 session->stats.rx_seq_discards++;
739 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
740 "%s: oos pkt %hu len %d discarded, "
741 "waiting for %hu, reorder_q_len=%d\n",
742 session->name, PPPOL2TP_SKB_CB(skb)->ns,
743 PPPOL2TP_SKB_CB(skb)->length, session->nr,
744 skb_queue_len(&session->reorder_q));
745 goto discard;
746 }
747 skb_queue_tail(&session->reorder_q, skb);
748 }
749 } else {
750 /* No sequence numbers. Add the skb to the tail of the
751 * reorder queue. This ensures that it will be
752 * delivered after all previous sequenced skbs.
753 */
754 skb_queue_tail(&session->reorder_q, skb);
755 }
756
757 /* Try to dequeue as many skbs from reorder_q as we can. */
758 pppol2tp_recv_dequeue(session);
759 sock_put(sock);
760
761 return 0;
762
763discard:
764 session->stats.rx_errors++;
765 kfree_skb(skb);
766 sock_put(session->sock);
767 sock_put(sock);
768
769 return 0;
770
771discard_bad_csum:
772 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
773 UDP_INC_STATS_USER(&init_net, UDP_MIB_INERRORS, 0);
774 tunnel->stats.rx_errors++;
775 kfree_skb(skb);
776 sock_put(sock);
777
778 return 0;
779
780error:
781 /* Put UDP header back */
782 __skb_push(skb, sizeof(struct udphdr));
783 sock_put(sock);
784
785no_tunnel:
786 return 1;
787}
788
789/* UDP encapsulation receive handler. See net/ipv4/udp.c.
790 * Return codes:
791 * 0 : success.
792 * <0: error
793 * >0: skb should be passed up to userspace as UDP.
794 */
795static int pppol2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
796{
797 struct pppol2tp_tunnel *tunnel;
798
799 tunnel = pppol2tp_sock_to_tunnel(sk);
800 if (tunnel == NULL)
801 goto pass_up;
802
803 PRINTK(tunnel->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
804 "%s: received %d bytes\n", tunnel->name, skb->len);
805
806 if (pppol2tp_recv_core(sk, skb))
807 goto pass_up_put;
808
809 sock_put(sk);
810 return 0;
811
812pass_up_put:
813 sock_put(sk);
814pass_up:
815 return 1;
816}
817
818/* Receive message. This is the recvmsg for the PPPoL2TP socket.
819 */
820static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
821 struct msghdr *msg, size_t len,
822 int flags)
823{
824 int err;
825 struct sk_buff *skb;
826 struct sock *sk = sock->sk;
827
828 err = -EIO;
829 if (sk->sk_state & PPPOX_BOUND)
830 goto end;
831
832 msg->msg_namelen = 0;
833
834 err = 0;
835 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
836 flags & MSG_DONTWAIT, &err);
837 if (!skb)
838 goto end;
839
840 if (len > skb->len)
841 len = skb->len;
842 else if (len < skb->len)
843 msg->msg_flags |= MSG_TRUNC;
844
845 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
846 if (likely(err == 0))
847 err = len;
848
849 kfree_skb(skb);
850end:
851 return err;
852}
853
854/************************************************************************
855 * Transmit handling
856 ***********************************************************************/
857
858/* Tell how big L2TP headers are for a particular session. This
859 * depends on whether sequence numbers are being used.
860 */
861static inline int pppol2tp_l2tp_header_len(struct pppol2tp_session *session)
862{
863 if (session->send_seq)
864 return PPPOL2TP_L2TP_HDR_SIZE_SEQ;
865
866 return PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
867}
868
869/* Build an L2TP header for the session into the buffer provided.
870 */
871static void pppol2tp_build_l2tp_header(struct pppol2tp_session *session,
872 void *buf)
873{
874 __be16 *bufp = buf;
875 u16 flags = L2TP_HDR_VER;
876
877 if (session->send_seq)
878 flags |= L2TP_HDRFLAG_S;
879
880 /* Setup L2TP header.
881 * FIXME: Can this ever be unaligned? Is direct dereferencing of
882 * 16-bit header fields safe here for all architectures?
883 */
884 *bufp++ = htons(flags);
885 *bufp++ = htons(session->tunnel_addr.d_tunnel);
886 *bufp++ = htons(session->tunnel_addr.d_session);
887 if (session->send_seq) {
888 *bufp++ = htons(session->ns);
889 *bufp++ = 0;
890 session->ns++;
891 PRINTK(session->debug, PPPOL2TP_MSG_SEQ, KERN_DEBUG,
892 "%s: updated ns to %hu\n", session->name, session->ns);
893 }
894}
895
896/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
897 * when a user application does a sendmsg() on the session socket. L2TP and
898 * PPP headers must be inserted into the user's data.
899 */
900static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
901 size_t total_len)
902{
903 static const unsigned char ppph[2] = { 0xff, 0x03 };
904 struct sock *sk = sock->sk;
905 struct inet_sock *inet;
906 __wsum csum;
907 struct sk_buff *skb;
908 int error;
909 int hdr_len;
910 struct pppol2tp_session *session;
911 struct pppol2tp_tunnel *tunnel;
912 struct udphdr *uh;
913 unsigned int len;
914 struct sock *sk_tun;
915 u16 udp_len;
916
917 error = -ENOTCONN;
918 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
919 goto error;
920
921 /* Get session and tunnel contexts */
922 error = -EBADF;
923 session = pppol2tp_sock_to_session(sk);
924 if (session == NULL)
925 goto error;
926
927 sk_tun = session->tunnel_sock;
928 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
929 if (tunnel == NULL)
930 goto error_put_sess;
931
932 /* What header length is configured for this session? */
933 hdr_len = pppol2tp_l2tp_header_len(session);
934
935 /* Allocate a socket buffer */
936 error = -ENOMEM;
937 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
938 sizeof(struct udphdr) + hdr_len +
939 sizeof(ppph) + total_len,
940 0, GFP_KERNEL);
941 if (!skb)
942 goto error_put_sess_tun;
943
944 /* Reserve space for headers. */
945 skb_reserve(skb, NET_SKB_PAD);
946 skb_reset_network_header(skb);
947 skb_reserve(skb, sizeof(struct iphdr));
948 skb_reset_transport_header(skb);
949
950 /* Build UDP header */
951 inet = inet_sk(sk_tun);
952 udp_len = hdr_len + sizeof(ppph) + total_len;
953 uh = (struct udphdr *) skb->data;
954 uh->source = inet->inet_sport;
955 uh->dest = inet->inet_dport;
956 uh->len = htons(udp_len);
957 uh->check = 0;
958 skb_put(skb, sizeof(struct udphdr));
959
960 /* Build L2TP header */
961 pppol2tp_build_l2tp_header(session, skb->data);
962 skb_put(skb, hdr_len);
963
964 /* Add PPP header */
965 skb->data[0] = ppph[0];
966 skb->data[1] = ppph[1];
967 skb_put(skb, 2);
968
969 /* Copy user data into skb */
970 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
971 if (error < 0) {
972 kfree_skb(skb);
973 goto error_put_sess_tun;
974 }
975 skb_put(skb, total_len);
976
977 /* Calculate UDP checksum if configured to do so */
978 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
979 skb->ip_summed = CHECKSUM_NONE;
980 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
981 skb->ip_summed = CHECKSUM_COMPLETE;
982 csum = skb_checksum(skb, 0, udp_len, 0);
983 uh->check = csum_tcpudp_magic(inet->inet_saddr,
984 inet->inet_daddr,
985 udp_len, IPPROTO_UDP, csum);
986 if (uh->check == 0)
987 uh->check = CSUM_MANGLED_0;
988 } else {
989 skb->ip_summed = CHECKSUM_PARTIAL;
990 skb->csum_start = skb_transport_header(skb) - skb->head;
991 skb->csum_offset = offsetof(struct udphdr, check);
992 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
993 inet->inet_daddr,
994 udp_len, IPPROTO_UDP, 0);
995 }
996
997 /* Debug */
998 if (session->send_seq)
999 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1000 "%s: send %Zd bytes, ns=%hu\n", session->name,
1001 total_len, session->ns - 1);
1002 else
1003 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1004 "%s: send %Zd bytes\n", session->name, total_len);
1005
1006 if (session->debug & PPPOL2TP_MSG_DATA) {
1007 int i;
1008 unsigned char *datap = skb->data;
1009
1010 printk(KERN_DEBUG "%s: xmit:", session->name);
1011 for (i = 0; i < total_len; i++) {
1012 printk(" %02X", *datap++);
1013 if (i == 15) {
1014 printk(" ...");
1015 break;
1016 }
1017 }
1018 printk("\n");
1019 }
1020
1021 /* Queue the packet to IP for output */
1022 len = skb->len;
1023 error = ip_queue_xmit(skb, 1);
1024
1025 /* Update stats */
1026 if (error >= 0) {
1027 tunnel->stats.tx_packets++;
1028 tunnel->stats.tx_bytes += len;
1029 session->stats.tx_packets++;
1030 session->stats.tx_bytes += len;
1031 } else {
1032 tunnel->stats.tx_errors++;
1033 session->stats.tx_errors++;
1034 }
1035
1036 return error;
1037
1038error_put_sess_tun:
1039 sock_put(session->tunnel_sock);
1040error_put_sess:
1041 sock_put(sk);
1042error:
1043 return error;
1044}
1045
1046/* Automatically called when the skb is freed.
1047 */
1048static void pppol2tp_sock_wfree(struct sk_buff *skb)
1049{
1050 sock_put(skb->sk);
1051}
1052
1053/* For data skbs that we transmit, we associate with the tunnel socket
1054 * but don't do accounting.
1055 */
1056static inline void pppol2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1057{
1058 sock_hold(sk);
1059 skb->sk = sk;
1060 skb->destructor = pppol2tp_sock_wfree;
1061}
1062
1063/* Transmit function called by generic PPP driver. Sends PPP frame
1064 * over PPPoL2TP socket.
1065 *
1066 * This is almost the same as pppol2tp_sendmsg(), but rather than
1067 * being called with a msghdr from userspace, it is called with a skb
1068 * from the kernel.
1069 *
1070 * The supplied skb from ppp doesn't have enough headroom for the
1071 * insertion of L2TP, UDP and IP headers so we need to allocate more
1072 * headroom in the skb. This will create a cloned skb. But we must be
1073 * careful in the error case because the caller will expect to free
1074 * the skb it supplied, not our cloned skb. So we take care to always
1075 * leave the original skb unfreed if we return an error.
1076 */
1077static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1078{
1079 static const u8 ppph[2] = { 0xff, 0x03 };
1080 struct sock *sk = (struct sock *) chan->private;
1081 struct sock *sk_tun;
1082 int hdr_len;
1083 u16 udp_len;
1084 struct pppol2tp_session *session;
1085 struct pppol2tp_tunnel *tunnel;
1086 int rc;
1087 int headroom;
1088 int data_len = skb->len;
1089 struct inet_sock *inet;
1090 __wsum csum;
1091 struct udphdr *uh;
1092 unsigned int len;
1093 int old_headroom;
1094 int new_headroom;
1095
1096 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
1097 goto abort;
1098
1099 /* Get session and tunnel contexts from the socket */
1100 session = pppol2tp_sock_to_session(sk);
1101 if (session == NULL)
1102 goto abort;
1103
1104 sk_tun = session->tunnel_sock;
1105 if (sk_tun == NULL)
1106 goto abort_put_sess;
1107 tunnel = pppol2tp_sock_to_tunnel(sk_tun);
1108 if (tunnel == NULL)
1109 goto abort_put_sess;
1110
1111 /* What header length is configured for this session? */
1112 hdr_len = pppol2tp_l2tp_header_len(session);
1113
1114 /* Check that there's enough headroom in the skb to insert IP,
1115 * UDP and L2TP and PPP headers. If not enough, expand it to
1116 * make room. Adjust truesize.
1117 */
1118 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1119 sizeof(struct udphdr) + hdr_len + sizeof(ppph);
1120 old_headroom = skb_headroom(skb);
1121 if (skb_cow_head(skb, headroom))
1122 goto abort_put_sess_tun;
1123
1124 new_headroom = skb_headroom(skb);
1125 skb_orphan(skb);
1126 skb->truesize += new_headroom - old_headroom;
1127
1128 /* Setup PPP header */
1129 __skb_push(skb, sizeof(ppph));
1130 skb->data[0] = ppph[0];
1131 skb->data[1] = ppph[1];
1132
1133 /* Setup L2TP header */
1134 pppol2tp_build_l2tp_header(session, __skb_push(skb, hdr_len));
1135
1136 udp_len = sizeof(struct udphdr) + hdr_len + sizeof(ppph) + data_len;
1137
1138 /* Setup UDP header */
1139 inet = inet_sk(sk_tun);
1140 __skb_push(skb, sizeof(*uh));
1141 skb_reset_transport_header(skb);
1142 uh = udp_hdr(skb);
1143 uh->source = inet->inet_sport;
1144 uh->dest = inet->inet_dport;
1145 uh->len = htons(udp_len);
1146 uh->check = 0;
1147
1148 /* Debug */
1149 if (session->send_seq)
1150 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1151 "%s: send %d bytes, ns=%hu\n", session->name,
1152 data_len, session->ns - 1);
1153 else
1154 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
1155 "%s: send %d bytes\n", session->name, data_len);
1156
1157 if (session->debug & PPPOL2TP_MSG_DATA) {
1158 int i;
1159 unsigned char *datap = skb->data;
1160
1161 printk(KERN_DEBUG "%s: xmit:", session->name);
1162 for (i = 0; i < data_len; i++) {
1163 printk(" %02X", *datap++);
1164 if (i == 31) {
1165 printk(" ...");
1166 break;
1167 }
1168 }
1169 printk("\n");
1170 }
1171
1172 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1173 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1174 IPSKB_REROUTED);
1175 nf_reset(skb);
1176
1177 /* Get routing info from the tunnel socket */
1178 skb_dst_drop(skb);
1179 skb_dst_set(skb, dst_clone(__sk_dst_get(sk_tun)));
1180 pppol2tp_skb_set_owner_w(skb, sk_tun);
1181
1182 /* Calculate UDP checksum if configured to do so */
1183 if (sk_tun->sk_no_check == UDP_CSUM_NOXMIT)
1184 skb->ip_summed = CHECKSUM_NONE;
1185 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1186 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1187 skb->ip_summed = CHECKSUM_COMPLETE;
1188 csum = skb_checksum(skb, 0, udp_len, 0);
1189 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1190 inet->inet_daddr,
1191 udp_len, IPPROTO_UDP, csum);
1192 if (uh->check == 0)
1193 uh->check = CSUM_MANGLED_0;
1194 } else {
1195 skb->ip_summed = CHECKSUM_PARTIAL;
1196 skb->csum_start = skb_transport_header(skb) - skb->head;
1197 skb->csum_offset = offsetof(struct udphdr, check);
1198 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1199 inet->inet_daddr,
1200 udp_len, IPPROTO_UDP, 0);
1201 }
1202
1203 /* Queue the packet to IP for output */
1204 len = skb->len;
1205 rc = ip_queue_xmit(skb, 1);
1206
1207 /* Update stats */
1208 if (rc >= 0) {
1209 tunnel->stats.tx_packets++;
1210 tunnel->stats.tx_bytes += len;
1211 session->stats.tx_packets++;
1212 session->stats.tx_bytes += len;
1213 } else {
1214 tunnel->stats.tx_errors++;
1215 session->stats.tx_errors++;
1216 }
1217
1218 sock_put(sk_tun);
1219 sock_put(sk);
1220 return 1;
1221
1222abort_put_sess_tun:
1223 sock_put(sk_tun);
1224abort_put_sess:
1225 sock_put(sk);
1226abort:
1227 /* Free the original skb */
1228 kfree_skb(skb);
1229 return 1;
1230}
1231
1232/*****************************************************************************
1233 * Session (and tunnel control) socket create/destroy.
1234 *****************************************************************************/
1235
1236/* When the tunnel UDP socket is closed, all the attached sockets need to go
1237 * too.
1238 */
1239static void pppol2tp_tunnel_closeall(struct pppol2tp_tunnel *tunnel)
1240{
1241 int hash;
1242 struct hlist_node *walk;
1243 struct hlist_node *tmp;
1244 struct pppol2tp_session *session;
1245 struct sock *sk;
1246
1247 BUG_ON(tunnel == NULL);
1248
1249 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1250 "%s: closing all sessions...\n", tunnel->name);
1251
1252 write_lock_bh(&tunnel->hlist_lock);
1253 for (hash = 0; hash < PPPOL2TP_HASH_SIZE; hash++) {
1254again:
1255 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1256 struct sk_buff *skb;
1257
1258 session = hlist_entry(walk, struct pppol2tp_session, hlist);
1259
1260 sk = session->sock;
1261
1262 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1263 "%s: closing session\n", session->name);
1264
1265 hlist_del_init(&session->hlist);
1266
1267 /* Since we should hold the sock lock while
1268 * doing any unbinding, we need to release the
1269 * lock we're holding before taking that lock.
1270 * Hold a reference to the sock so it doesn't
1271 * disappear as we're jumping between locks.
1272 */
1273 sock_hold(sk);
1274 write_unlock_bh(&tunnel->hlist_lock);
1275 lock_sock(sk);
1276
1277 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
1278 pppox_unbind_sock(sk);
1279 sk->sk_state = PPPOX_DEAD;
1280 sk->sk_state_change(sk);
1281 }
1282
1283 /* Purge any queued data */
1284 skb_queue_purge(&sk->sk_receive_queue);
1285 skb_queue_purge(&sk->sk_write_queue);
1286 while ((skb = skb_dequeue(&session->reorder_q))) {
1287 kfree_skb(skb);
1288 sock_put(sk);
1289 }
1290
1291 release_sock(sk);
1292 sock_put(sk);
1293
1294 /* Now restart from the beginning of this hash
1295 * chain. We always remove a session from the
1296 * list so we are guaranteed to make forward
1297 * progress.
1298 */
1299 write_lock_bh(&tunnel->hlist_lock);
1300 goto again;
1301 }
1302 }
1303 write_unlock_bh(&tunnel->hlist_lock);
1304}
1305
1306/* Really kill the tunnel.
1307 * Come here only when all sessions have been cleared from the tunnel.
1308 */
1309static void pppol2tp_tunnel_free(struct pppol2tp_tunnel *tunnel)
1310{
1311 struct pppol2tp_net *pn = pppol2tp_pernet(tunnel->pppol2tp_net);
1312
1313 /* Remove from socket list */
1314 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1315 list_del_init(&tunnel->list);
1316 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1317
1318 atomic_dec(&pppol2tp_tunnel_count);
1319 kfree(tunnel);
1320}
1321
1322/* Tunnel UDP socket destruct hook.
1323 * The tunnel context is deleted only when all session sockets have been
1324 * closed.
1325 */
1326static void pppol2tp_tunnel_destruct(struct sock *sk)
1327{
1328 struct pppol2tp_tunnel *tunnel;
1329
1330 tunnel = sk->sk_user_data;
1331 if (tunnel == NULL)
1332 goto end;
1333
1334 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1335 "%s: closing...\n", tunnel->name);
1336
1337 /* Close all sessions */
1338 pppol2tp_tunnel_closeall(tunnel);
1339
1340 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1341 (udp_sk(sk))->encap_type = 0;
1342 (udp_sk(sk))->encap_rcv = NULL;
1343
1344 /* Remove hooks into tunnel socket */
1345 tunnel->sock = NULL;
1346 sk->sk_destruct = tunnel->old_sk_destruct;
1347 sk->sk_user_data = NULL;
1348
1349 /* Call original (UDP) socket descructor */
1350 if (sk->sk_destruct != NULL)
1351 (*sk->sk_destruct)(sk);
1352
1353 pppol2tp_tunnel_dec_refcount(tunnel);
1354
1355end:
1356 return;
1357}
1358
1359/* Really kill the session socket. (Called from sock_put() if
1360 * refcnt == 0.)
1361 */
1362static void pppol2tp_session_destruct(struct sock *sk)
1363{
1364 struct pppol2tp_session *session = NULL;
1365
1366 if (sk->sk_user_data != NULL) {
1367 struct pppol2tp_tunnel *tunnel;
1368
1369 session = sk->sk_user_data;
1370 if (session == NULL)
1371 goto out;
1372
1373 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
1374
1375 /* Don't use pppol2tp_sock_to_tunnel() here to
1376 * get the tunnel context because the tunnel
1377 * socket might have already been closed (its
1378 * sk->sk_user_data will be NULL) so use the
1379 * session's private tunnel ptr instead.
1380 */
1381 tunnel = session->tunnel;
1382 if (tunnel != NULL) {
1383 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1384
1385 /* If session_id is zero, this is a null
1386 * session context, which was created for a
1387 * socket that is being used only to manage
1388 * tunnels.
1389 */
1390 if (session->tunnel_addr.s_session != 0) {
1391 /* Delete the session socket from the
1392 * hash
1393 */
1394 write_lock_bh(&tunnel->hlist_lock);
1395 hlist_del_init(&session->hlist);
1396 write_unlock_bh(&tunnel->hlist_lock);
1397
1398 atomic_dec(&pppol2tp_session_count);
1399 }
1400
1401 /* This will delete the tunnel context if this
1402 * is the last session on the tunnel.
1403 */
1404 session->tunnel = NULL;
1405 session->tunnel_sock = NULL;
1406 pppol2tp_tunnel_dec_refcount(tunnel);
1407 }
1408 }
1409
1410 kfree(session);
1411out:
1412 return;
1413}
1414
1415/* Called when the PPPoX socket (session) is closed.
1416 */
1417static int pppol2tp_release(struct socket *sock)
1418{
1419 struct sock *sk = sock->sk;
1420 struct pppol2tp_session *session;
1421 int error;
1422
1423 if (!sk)
1424 return 0;
1425
1426 error = -EBADF;
1427 lock_sock(sk);
1428 if (sock_flag(sk, SOCK_DEAD) != 0)
1429 goto error;
1430
1431 pppox_unbind_sock(sk);
1432
1433 /* Signal the death of the socket. */
1434 sk->sk_state = PPPOX_DEAD;
1435 sock_orphan(sk);
1436 sock->sk = NULL;
1437
1438 session = pppol2tp_sock_to_session(sk);
1439
1440 /* Purge any queued data */
1441 skb_queue_purge(&sk->sk_receive_queue);
1442 skb_queue_purge(&sk->sk_write_queue);
1443 if (session != NULL) {
1444 struct sk_buff *skb;
1445 while ((skb = skb_dequeue(&session->reorder_q))) {
1446 kfree_skb(skb);
1447 sock_put(sk);
1448 }
1449 sock_put(sk);
1450 }
1451
1452 release_sock(sk);
1453
1454 /* This will delete the session context via
1455 * pppol2tp_session_destruct() if the socket's refcnt drops to
1456 * zero.
1457 */
1458 sock_put(sk);
1459
1460 return 0;
1461
1462error:
1463 release_sock(sk);
1464 return error;
1465}
1466
1467/* Internal function to prepare a tunnel (UDP) socket to have PPPoX
1468 * sockets attached to it.
1469 */
1470static struct sock *pppol2tp_prepare_tunnel_socket(struct net *net,
1471 int fd, u16 tunnel_id, int *error)
1472{
1473 int err;
1474 struct socket *sock = NULL;
1475 struct sock *sk;
1476 struct pppol2tp_tunnel *tunnel;
1477 struct pppol2tp_net *pn;
1478 struct sock *ret = NULL;
1479
1480 /* Get the tunnel UDP socket from the fd, which was opened by
1481 * the userspace L2TP daemon.
1482 */
1483 err = -EBADF;
1484 sock = sockfd_lookup(fd, &err);
1485 if (!sock) {
1486 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1487 "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1488 tunnel_id, fd, err);
1489 goto err;
1490 }
1491
1492 sk = sock->sk;
1493
1494 /* Quick sanity checks */
1495 err = -EPROTONOSUPPORT;
1496 if (sk->sk_protocol != IPPROTO_UDP) {
1497 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1498 "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1499 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1500 goto err;
1501 }
1502 err = -EAFNOSUPPORT;
1503 if (sock->ops->family != AF_INET) {
1504 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_ERR,
1505 "tunl %hu: fd %d wrong family, got %d, expected %d\n",
1506 tunnel_id, fd, sock->ops->family, AF_INET);
1507 goto err;
1508 }
1509
1510 err = -ENOTCONN;
1511
1512 /* Check if this socket has already been prepped */
1513 tunnel = (struct pppol2tp_tunnel *)sk->sk_user_data;
1514 if (tunnel != NULL) {
1515 /* User-data field already set */
1516 err = -EBUSY;
1517 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1518
1519 /* This socket has already been prepped */
1520 ret = tunnel->sock;
1521 goto out;
1522 }
1523
1524 /* This socket is available and needs prepping. Create a new tunnel
1525 * context and init it.
1526 */
1527 sk->sk_user_data = tunnel = kzalloc(sizeof(struct pppol2tp_tunnel), GFP_KERNEL);
1528 if (sk->sk_user_data == NULL) {
1529 err = -ENOMEM;
1530 goto err;
1531 }
1532
1533 tunnel->magic = L2TP_TUNNEL_MAGIC;
1534 sprintf(&tunnel->name[0], "tunl %hu", tunnel_id);
1535
1536 tunnel->stats.tunnel_id = tunnel_id;
1537 tunnel->debug = PPPOL2TP_DEFAULT_DEBUG_FLAGS;
1538
1539 /* Hook on the tunnel socket destructor so that we can cleanup
1540 * if the tunnel socket goes away.
1541 */
1542 tunnel->old_sk_destruct = sk->sk_destruct;
1543 sk->sk_destruct = pppol2tp_tunnel_destruct;
1544
1545 tunnel->sock = sk;
1546 sk->sk_allocation = GFP_ATOMIC;
1547
1548 /* Misc init */
1549 rwlock_init(&tunnel->hlist_lock);
1550
1551 /* The net we belong to */
1552 tunnel->pppol2tp_net = net;
1553 pn = pppol2tp_pernet(net);
1554
1555 /* Add tunnel to our list */
1556 INIT_LIST_HEAD(&tunnel->list);
1557 write_lock_bh(&pn->pppol2tp_tunnel_list_lock);
1558 list_add(&tunnel->list, &pn->pppol2tp_tunnel_list);
1559 write_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
1560 atomic_inc(&pppol2tp_tunnel_count);
1561
1562 /* Bump the reference count. The tunnel context is deleted
1563 * only when this drops to zero.
1564 */
1565 pppol2tp_tunnel_inc_refcount(tunnel);
1566
1567 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1568 (udp_sk(sk))->encap_type = UDP_ENCAP_L2TPINUDP;
1569 (udp_sk(sk))->encap_rcv = pppol2tp_udp_encap_recv;
1570
1571 ret = tunnel->sock;
1572
1573 *error = 0;
1574out:
1575 if (sock)
1576 sockfd_put(sock);
1577
1578 return ret;
1579
1580err:
1581 *error = err;
1582 goto out;
1583}
1584
1585static struct proto pppol2tp_sk_proto = {
1586 .name = "PPPOL2TP",
1587 .owner = THIS_MODULE,
1588 .obj_size = sizeof(struct pppox_sock),
1589};
1590
1591/* socket() handler. Initialize a new struct sock.
1592 */
1593static int pppol2tp_create(struct net *net, struct socket *sock)
1594{
1595 int error = -ENOMEM;
1596 struct sock *sk;
1597
1598 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
1599 if (!sk)
1600 goto out;
1601
1602 sock_init_data(sock, sk);
1603
1604 sock->state = SS_UNCONNECTED;
1605 sock->ops = &pppol2tp_ops;
1606
1607 sk->sk_backlog_rcv = pppol2tp_recv_core;
1608 sk->sk_protocol = PX_PROTO_OL2TP;
1609 sk->sk_family = PF_PPPOX;
1610 sk->sk_state = PPPOX_NONE;
1611 sk->sk_type = SOCK_STREAM;
1612 sk->sk_destruct = pppol2tp_session_destruct;
1613
1614 error = 0;
1615
1616out:
1617 return error;
1618}
1619
1620/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
1621 */
1622static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
1623 int sockaddr_len, int flags)
1624{
1625 struct sock *sk = sock->sk;
1626 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
1627 struct pppox_sock *po = pppox_sk(sk);
1628 struct sock *tunnel_sock = NULL;
1629 struct pppol2tp_session *session = NULL;
1630 struct pppol2tp_tunnel *tunnel;
1631 struct dst_entry *dst;
1632 int error = 0;
1633
1634 lock_sock(sk);
1635
1636 error = -EINVAL;
1637 if (sp->sa_protocol != PX_PROTO_OL2TP)
1638 goto end;
1639
1640 /* Check for already bound sockets */
1641 error = -EBUSY;
1642 if (sk->sk_state & PPPOX_CONNECTED)
1643 goto end;
1644
1645 /* We don't supporting rebinding anyway */
1646 error = -EALREADY;
1647 if (sk->sk_user_data)
1648 goto end; /* socket is already attached */
1649
1650 /* Don't bind if s_tunnel is 0 */
1651 error = -EINVAL;
1652 if (sp->pppol2tp.s_tunnel == 0)
1653 goto end;
1654
1655 /* Special case: prepare tunnel socket if s_session and
1656 * d_session is 0. Otherwise look up tunnel using supplied
1657 * tunnel id.
1658 */
1659 if ((sp->pppol2tp.s_session == 0) && (sp->pppol2tp.d_session == 0)) {
1660 tunnel_sock = pppol2tp_prepare_tunnel_socket(sock_net(sk),
1661 sp->pppol2tp.fd,
1662 sp->pppol2tp.s_tunnel,
1663 &error);
1664 if (tunnel_sock == NULL)
1665 goto end;
1666
1667 sock_hold(tunnel_sock);
1668 tunnel = tunnel_sock->sk_user_data;
1669 } else {
1670 tunnel = pppol2tp_tunnel_find(sock_net(sk), sp->pppol2tp.s_tunnel);
1671
1672 /* Error if we can't find the tunnel */
1673 error = -ENOENT;
1674 if (tunnel == NULL)
1675 goto end;
1676
1677 tunnel_sock = tunnel->sock;
1678 }
1679
1680 /* Check that this session doesn't already exist */
1681 error = -EEXIST;
1682 session = pppol2tp_session_find(tunnel, sp->pppol2tp.s_session);
1683 if (session != NULL)
1684 goto end;
1685
1686 /* Allocate and initialize a new session context. */
1687 session = kzalloc(sizeof(struct pppol2tp_session), GFP_KERNEL);
1688 if (session == NULL) {
1689 error = -ENOMEM;
1690 goto end;
1691 }
1692
1693 skb_queue_head_init(&session->reorder_q);
1694
1695 session->magic = L2TP_SESSION_MAGIC;
1696 session->owner = current->pid;
1697 session->sock = sk;
1698 session->tunnel = tunnel;
1699 session->tunnel_sock = tunnel_sock;
1700 session->tunnel_addr = sp->pppol2tp;
1701 sprintf(&session->name[0], "sess %hu/%hu",
1702 session->tunnel_addr.s_tunnel,
1703 session->tunnel_addr.s_session);
1704
1705 session->stats.tunnel_id = session->tunnel_addr.s_tunnel;
1706 session->stats.session_id = session->tunnel_addr.s_session;
1707
1708 INIT_HLIST_NODE(&session->hlist);
1709
1710 /* Inherit debug options from tunnel */
1711 session->debug = tunnel->debug;
1712
1713 /* Default MTU must allow space for UDP/L2TP/PPP
1714 * headers.
1715 */
1716 session->mtu = session->mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
1717
1718 /* If PMTU discovery was enabled, use the MTU that was discovered */
1719 dst = sk_dst_get(sk);
1720 if (dst != NULL) {
1721 u32 pmtu = dst_mtu(__sk_dst_get(sk));
1722 if (pmtu != 0)
1723 session->mtu = session->mru = pmtu -
1724 PPPOL2TP_HEADER_OVERHEAD;
1725 dst_release(dst);
1726 }
1727
1728 /* Special case: if source & dest session_id == 0x0000, this socket is
1729 * being created to manage the tunnel. Don't add the session to the
1730 * session hash list, just set up the internal context for use by
1731 * ioctl() and sockopt() handlers.
1732 */
1733 if ((session->tunnel_addr.s_session == 0) &&
1734 (session->tunnel_addr.d_session == 0)) {
1735 error = 0;
1736 sk->sk_user_data = session;
1737 goto out_no_ppp;
1738 }
1739
1740 /* Get tunnel context from the tunnel socket */
1741 tunnel = pppol2tp_sock_to_tunnel(tunnel_sock);
1742 if (tunnel == NULL) {
1743 error = -EBADF;
1744 goto end;
1745 }
1746
1747 /* Right now, because we don't have a way to push the incoming skb's
1748 * straight through the UDP layer, the only header we need to worry
1749 * about is the L2TP header. This size is different depending on
1750 * whether sequence numbers are enabled for the data channel.
1751 */
1752 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1753
1754 po->chan.private = sk;
1755 po->chan.ops = &pppol2tp_chan_ops;
1756 po->chan.mtu = session->mtu;
1757
1758 error = ppp_register_net_channel(sock_net(sk), &po->chan);
1759 if (error)
1760 goto end_put_tun;
1761
1762 /* This is how we get the session context from the socket. */
1763 sk->sk_user_data = session;
1764
1765 /* Add session to the tunnel's hash list */
1766 write_lock_bh(&tunnel->hlist_lock);
1767 hlist_add_head(&session->hlist,
1768 pppol2tp_session_id_hash(tunnel,
1769 session->tunnel_addr.s_session));
1770 write_unlock_bh(&tunnel->hlist_lock);
1771
1772 atomic_inc(&pppol2tp_session_count);
1773
1774out_no_ppp:
1775 pppol2tp_tunnel_inc_refcount(tunnel);
1776 sk->sk_state = PPPOX_CONNECTED;
1777 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1778 "%s: created\n", session->name);
1779
1780end_put_tun:
1781 sock_put(tunnel_sock);
1782end:
1783 release_sock(sk);
1784
1785 if (error != 0) {
1786 if (session)
1787 PRINTK(session->debug,
1788 PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1789 "%s: connect failed: %d\n",
1790 session->name, error);
1791 else
1792 PRINTK(-1, PPPOL2TP_MSG_CONTROL, KERN_WARNING,
1793 "connect failed: %d\n", error);
1794 }
1795
1796 return error;
1797}
1798
1799/* getname() support.
1800 */
1801static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
1802 int *usockaddr_len, int peer)
1803{
1804 int len = sizeof(struct sockaddr_pppol2tp);
1805 struct sockaddr_pppol2tp sp;
1806 int error = 0;
1807 struct pppol2tp_session *session;
1808
1809 error = -ENOTCONN;
1810 if (sock->sk->sk_state != PPPOX_CONNECTED)
1811 goto end;
1812
1813 session = pppol2tp_sock_to_session(sock->sk);
1814 if (session == NULL) {
1815 error = -EBADF;
1816 goto end;
1817 }
1818
1819 sp.sa_family = AF_PPPOX;
1820 sp.sa_protocol = PX_PROTO_OL2TP;
1821 memcpy(&sp.pppol2tp, &session->tunnel_addr,
1822 sizeof(struct pppol2tp_addr));
1823
1824 memcpy(uaddr, &sp, len);
1825
1826 *usockaddr_len = len;
1827
1828 error = 0;
1829 sock_put(sock->sk);
1830
1831end:
1832 return error;
1833}
1834
1835/****************************************************************************
1836 * ioctl() handlers.
1837 *
1838 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1839 * sockets. However, in order to control kernel tunnel features, we allow
1840 * userspace to create a special "tunnel" PPPoX socket which is used for
1841 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
1842 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
1843 * calls.
1844 ****************************************************************************/
1845
1846/* Session ioctl helper.
1847 */
1848static int pppol2tp_session_ioctl(struct pppol2tp_session *session,
1849 unsigned int cmd, unsigned long arg)
1850{
1851 struct ifreq ifr;
1852 int err = 0;
1853 struct sock *sk = session->sock;
1854 int val = (int) arg;
1855
1856 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1857 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
1858 session->name, cmd, arg);
1859
1860 sock_hold(sk);
1861
1862 switch (cmd) {
1863 case SIOCGIFMTU:
1864 err = -ENXIO;
1865 if (!(sk->sk_state & PPPOX_CONNECTED))
1866 break;
1867
1868 err = -EFAULT;
1869 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1870 break;
1871 ifr.ifr_mtu = session->mtu;
1872 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1873 break;
1874
1875 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1876 "%s: get mtu=%d\n", session->name, session->mtu);
1877 err = 0;
1878 break;
1879
1880 case SIOCSIFMTU:
1881 err = -ENXIO;
1882 if (!(sk->sk_state & PPPOX_CONNECTED))
1883 break;
1884
1885 err = -EFAULT;
1886 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1887 break;
1888
1889 session->mtu = ifr.ifr_mtu;
1890
1891 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1892 "%s: set mtu=%d\n", session->name, session->mtu);
1893 err = 0;
1894 break;
1895
1896 case PPPIOCGMRU:
1897 err = -ENXIO;
1898 if (!(sk->sk_state & PPPOX_CONNECTED))
1899 break;
1900
1901 err = -EFAULT;
1902 if (put_user(session->mru, (int __user *) arg))
1903 break;
1904
1905 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1906 "%s: get mru=%d\n", session->name, session->mru);
1907 err = 0;
1908 break;
1909
1910 case PPPIOCSMRU:
1911 err = -ENXIO;
1912 if (!(sk->sk_state & PPPOX_CONNECTED))
1913 break;
1914
1915 err = -EFAULT;
1916 if (get_user(val,(int __user *) arg))
1917 break;
1918
1919 session->mru = val;
1920 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1921 "%s: set mru=%d\n", session->name, session->mru);
1922 err = 0;
1923 break;
1924
1925 case PPPIOCGFLAGS:
1926 err = -EFAULT;
1927 if (put_user(session->flags, (int __user *) arg))
1928 break;
1929
1930 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1931 "%s: get flags=%d\n", session->name, session->flags);
1932 err = 0;
1933 break;
1934
1935 case PPPIOCSFLAGS:
1936 err = -EFAULT;
1937 if (get_user(val, (int __user *) arg))
1938 break;
1939 session->flags = val;
1940 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1941 "%s: set flags=%d\n", session->name, session->flags);
1942 err = 0;
1943 break;
1944
1945 case PPPIOCGL2TPSTATS:
1946 err = -ENXIO;
1947 if (!(sk->sk_state & PPPOX_CONNECTED))
1948 break;
1949
1950 if (copy_to_user((void __user *) arg, &session->stats,
1951 sizeof(session->stats)))
1952 break;
1953 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1954 "%s: get L2TP stats\n", session->name);
1955 err = 0;
1956 break;
1957
1958 default:
1959 err = -ENOSYS;
1960 break;
1961 }
1962
1963 sock_put(sk);
1964
1965 return err;
1966}
1967
1968/* Tunnel ioctl helper.
1969 *
1970 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1971 * specifies a session_id, the session ioctl handler is called. This allows an
1972 * application to retrieve session stats via a tunnel socket.
1973 */
1974static int pppol2tp_tunnel_ioctl(struct pppol2tp_tunnel *tunnel,
1975 unsigned int cmd, unsigned long arg)
1976{
1977 int err = 0;
1978 struct sock *sk = tunnel->sock;
1979 struct pppol2tp_ioc_stats stats_req;
1980
1981 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1982 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n", tunnel->name,
1983 cmd, arg);
1984
1985 sock_hold(sk);
1986
1987 switch (cmd) {
1988 case PPPIOCGL2TPSTATS:
1989 err = -ENXIO;
1990 if (!(sk->sk_state & PPPOX_CONNECTED))
1991 break;
1992
1993 if (copy_from_user(&stats_req, (void __user *) arg,
1994 sizeof(stats_req))) {
1995 err = -EFAULT;
1996 break;
1997 }
1998 if (stats_req.session_id != 0) {
1999 /* resend to session ioctl handler */
2000 struct pppol2tp_session *session =
2001 pppol2tp_session_find(tunnel, stats_req.session_id);
2002 if (session != NULL)
2003 err = pppol2tp_session_ioctl(session, cmd, arg);
2004 else
2005 err = -EBADR;
2006 break;
2007 }
2008#ifdef CONFIG_XFRM
2009 tunnel->stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
2010#endif
2011 if (copy_to_user((void __user *) arg, &tunnel->stats,
2012 sizeof(tunnel->stats))) {
2013 err = -EFAULT;
2014 break;
2015 }
2016 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2017 "%s: get L2TP stats\n", tunnel->name);
2018 err = 0;
2019 break;
2020
2021 default:
2022 err = -ENOSYS;
2023 break;
2024 }
2025
2026 sock_put(sk);
2027
2028 return err;
2029}
2030
2031/* Main ioctl() handler.
2032 * Dispatch to tunnel or session helpers depending on the socket.
2033 */
2034static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
2035 unsigned long arg)
2036{
2037 struct sock *sk = sock->sk;
2038 struct pppol2tp_session *session;
2039 struct pppol2tp_tunnel *tunnel;
2040 int err;
2041
2042 if (!sk)
2043 return 0;
2044
2045 err = -EBADF;
2046 if (sock_flag(sk, SOCK_DEAD) != 0)
2047 goto end;
2048
2049 err = -ENOTCONN;
2050 if ((sk->sk_user_data == NULL) ||
2051 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
2052 goto end;
2053
2054 /* Get session context from the socket */
2055 err = -EBADF;
2056 session = pppol2tp_sock_to_session(sk);
2057 if (session == NULL)
2058 goto end;
2059
2060 /* Special case: if session's session_id is zero, treat ioctl as a
2061 * tunnel ioctl
2062 */
2063 if ((session->tunnel_addr.s_session == 0) &&
2064 (session->tunnel_addr.d_session == 0)) {
2065 err = -EBADF;
2066 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2067 if (tunnel == NULL)
2068 goto end_put_sess;
2069
2070 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
2071 sock_put(session->tunnel_sock);
2072 goto end_put_sess;
2073 }
2074
2075 err = pppol2tp_session_ioctl(session, cmd, arg);
2076
2077end_put_sess:
2078 sock_put(sk);
2079end:
2080 return err;
2081}
2082
2083/*****************************************************************************
2084 * setsockopt() / getsockopt() support.
2085 *
2086 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
2087 * sockets. In order to control kernel tunnel features, we allow userspace to
2088 * create a special "tunnel" PPPoX socket which is used for control only.
2089 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
2090 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
2091 *****************************************************************************/
2092
2093/* Tunnel setsockopt() helper.
2094 */
2095static int pppol2tp_tunnel_setsockopt(struct sock *sk,
2096 struct pppol2tp_tunnel *tunnel,
2097 int optname, int val)
2098{
2099 int err = 0;
2100
2101 switch (optname) {
2102 case PPPOL2TP_SO_DEBUG:
2103 tunnel->debug = val;
2104 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2105 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
2106 break;
2107
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2111 }
2112
2113 return err;
2114}
2115
2116/* Session setsockopt helper.
2117 */
2118static int pppol2tp_session_setsockopt(struct sock *sk,
2119 struct pppol2tp_session *session,
2120 int optname, int val)
2121{
2122 int err = 0;
2123
2124 switch (optname) {
2125 case PPPOL2TP_SO_RECVSEQ:
2126 if ((val != 0) && (val != 1)) {
2127 err = -EINVAL;
2128 break;
2129 }
2130 session->recv_seq = val ? -1 : 0;
2131 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2132 "%s: set recv_seq=%d\n", session->name,
2133 session->recv_seq);
2134 break;
2135
2136 case PPPOL2TP_SO_SENDSEQ:
2137 if ((val != 0) && (val != 1)) {
2138 err = -EINVAL;
2139 break;
2140 }
2141 session->send_seq = val ? -1 : 0;
2142 {
2143 struct sock *ssk = session->sock;
2144 struct pppox_sock *po = pppox_sk(ssk);
2145 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
2146 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
2147 }
2148 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2149 "%s: set send_seq=%d\n", session->name, session->send_seq);
2150 break;
2151
2152 case PPPOL2TP_SO_LNSMODE:
2153 if ((val != 0) && (val != 1)) {
2154 err = -EINVAL;
2155 break;
2156 }
2157 session->lns_mode = val ? -1 : 0;
2158 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2159 "%s: set lns_mode=%d\n", session->name,
2160 session->lns_mode);
2161 break;
2162
2163 case PPPOL2TP_SO_DEBUG:
2164 session->debug = val;
2165 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2166 "%s: set debug=%x\n", session->name, session->debug);
2167 break;
2168
2169 case PPPOL2TP_SO_REORDERTO:
2170 session->reorder_timeout = msecs_to_jiffies(val);
2171 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2172 "%s: set reorder_timeout=%d\n", session->name,
2173 session->reorder_timeout);
2174 break;
2175
2176 default:
2177 err = -ENOPROTOOPT;
2178 break;
2179 }
2180
2181 return err;
2182}
2183
2184/* Main setsockopt() entry point.
2185 * Does API checks, then calls either the tunnel or session setsockopt
2186 * handler, according to whether the PPPoL2TP socket is a for a regular
2187 * session or the special tunnel type.
2188 */
2189static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
2190 char __user *optval, unsigned int optlen)
2191{
2192 struct sock *sk = sock->sk;
2193 struct pppol2tp_session *session = sk->sk_user_data;
2194 struct pppol2tp_tunnel *tunnel;
2195 int val;
2196 int err;
2197
2198 if (level != SOL_PPPOL2TP)
2199 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
2200
2201 if (optlen < sizeof(int))
2202 return -EINVAL;
2203
2204 if (get_user(val, (int __user *)optval))
2205 return -EFAULT;
2206
2207 err = -ENOTCONN;
2208 if (sk->sk_user_data == NULL)
2209 goto end;
2210
2211 /* Get session context from the socket */
2212 err = -EBADF;
2213 session = pppol2tp_sock_to_session(sk);
2214 if (session == NULL)
2215 goto end;
2216
2217 /* Special case: if session_id == 0x0000, treat as operation on tunnel
2218 */
2219 if ((session->tunnel_addr.s_session == 0) &&
2220 (session->tunnel_addr.d_session == 0)) {
2221 err = -EBADF;
2222 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2223 if (tunnel == NULL)
2224 goto end_put_sess;
2225
2226 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
2227 sock_put(session->tunnel_sock);
2228 } else
2229 err = pppol2tp_session_setsockopt(sk, session, optname, val);
2230
2231 err = 0;
2232
2233end_put_sess:
2234 sock_put(sk);
2235end:
2236 return err;
2237}
2238
2239/* Tunnel getsockopt helper. Called with sock locked.
2240 */
2241static int pppol2tp_tunnel_getsockopt(struct sock *sk,
2242 struct pppol2tp_tunnel *tunnel,
2243 int optname, int *val)
2244{
2245 int err = 0;
2246
2247 switch (optname) {
2248 case PPPOL2TP_SO_DEBUG:
2249 *val = tunnel->debug;
2250 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2251 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
2252 break;
2253
2254 default:
2255 err = -ENOPROTOOPT;
2256 break;
2257 }
2258
2259 return err;
2260}
2261
2262/* Session getsockopt helper. Called with sock locked.
2263 */
2264static int pppol2tp_session_getsockopt(struct sock *sk,
2265 struct pppol2tp_session *session,
2266 int optname, int *val)
2267{
2268 int err = 0;
2269
2270 switch (optname) {
2271 case PPPOL2TP_SO_RECVSEQ:
2272 *val = session->recv_seq;
2273 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2274 "%s: get recv_seq=%d\n", session->name, *val);
2275 break;
2276
2277 case PPPOL2TP_SO_SENDSEQ:
2278 *val = session->send_seq;
2279 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2280 "%s: get send_seq=%d\n", session->name, *val);
2281 break;
2282
2283 case PPPOL2TP_SO_LNSMODE:
2284 *val = session->lns_mode;
2285 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2286 "%s: get lns_mode=%d\n", session->name, *val);
2287 break;
2288
2289 case PPPOL2TP_SO_DEBUG:
2290 *val = session->debug;
2291 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2292 "%s: get debug=%d\n", session->name, *val);
2293 break;
2294
2295 case PPPOL2TP_SO_REORDERTO:
2296 *val = (int) jiffies_to_msecs(session->reorder_timeout);
2297 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
2298 "%s: get reorder_timeout=%d\n", session->name, *val);
2299 break;
2300
2301 default:
2302 err = -ENOPROTOOPT;
2303 }
2304
2305 return err;
2306}
2307
2308/* Main getsockopt() entry point.
2309 * Does API checks, then calls either the tunnel or session getsockopt
2310 * handler, according to whether the PPPoX socket is a for a regular session
2311 * or the special tunnel type.
2312 */
2313static int pppol2tp_getsockopt(struct socket *sock, int level,
2314 int optname, char __user *optval, int __user *optlen)
2315{
2316 struct sock *sk = sock->sk;
2317 struct pppol2tp_session *session = sk->sk_user_data;
2318 struct pppol2tp_tunnel *tunnel;
2319 int val, len;
2320 int err;
2321
2322 if (level != SOL_PPPOL2TP)
2323 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
2324
2325 if (get_user(len, (int __user *) optlen))
2326 return -EFAULT;
2327
2328 len = min_t(unsigned int, len, sizeof(int));
2329
2330 if (len < 0)
2331 return -EINVAL;
2332
2333 err = -ENOTCONN;
2334 if (sk->sk_user_data == NULL)
2335 goto end;
2336
2337 /* Get the session context */
2338 err = -EBADF;
2339 session = pppol2tp_sock_to_session(sk);
2340 if (session == NULL)
2341 goto end;
2342
2343 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
2344 if ((session->tunnel_addr.s_session == 0) &&
2345 (session->tunnel_addr.d_session == 0)) {
2346 err = -EBADF;
2347 tunnel = pppol2tp_sock_to_tunnel(session->tunnel_sock);
2348 if (tunnel == NULL)
2349 goto end_put_sess;
2350
2351 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
2352 sock_put(session->tunnel_sock);
2353 } else
2354 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
2355
2356 err = -EFAULT;
2357 if (put_user(len, (int __user *) optlen))
2358 goto end_put_sess;
2359
2360 if (copy_to_user((void __user *) optval, &val, len))
2361 goto end_put_sess;
2362
2363 err = 0;
2364
2365end_put_sess:
2366 sock_put(sk);
2367end:
2368 return err;
2369}
2370
2371/*****************************************************************************
2372 * /proc filesystem for debug
2373 *****************************************************************************/
2374
2375#ifdef CONFIG_PROC_FS
2376
2377#include <linux/seq_file.h>
2378
2379struct pppol2tp_seq_data {
2380 struct seq_net_private p;
2381 struct pppol2tp_tunnel *tunnel; /* current tunnel */
2382 struct pppol2tp_session *session; /* NULL means get first session in tunnel */
2383};
2384
2385static struct pppol2tp_session *next_session(struct pppol2tp_tunnel *tunnel, struct pppol2tp_session *curr)
2386{
2387 struct pppol2tp_session *session = NULL;
2388 struct hlist_node *walk;
2389 int found = 0;
2390 int next = 0;
2391 int i;
2392
2393 read_lock_bh(&tunnel->hlist_lock);
2394 for (i = 0; i < PPPOL2TP_HASH_SIZE; i++) {
2395 hlist_for_each_entry(session, walk, &tunnel->session_hlist[i], hlist) {
2396 if (curr == NULL) {
2397 found = 1;
2398 goto out;
2399 }
2400 if (session == curr) {
2401 next = 1;
2402 continue;
2403 }
2404 if (next) {
2405 found = 1;
2406 goto out;
2407 }
2408 }
2409 }
2410out:
2411 read_unlock_bh(&tunnel->hlist_lock);
2412 if (!found)
2413 session = NULL;
2414
2415 return session;
2416}
2417
2418static struct pppol2tp_tunnel *next_tunnel(struct pppol2tp_net *pn,
2419 struct pppol2tp_tunnel *curr)
2420{
2421 struct pppol2tp_tunnel *tunnel = NULL;
2422
2423 read_lock_bh(&pn->pppol2tp_tunnel_list_lock);
2424 if (list_is_last(&curr->list, &pn->pppol2tp_tunnel_list)) {
2425 goto out;
2426 }
2427 tunnel = list_entry(curr->list.next, struct pppol2tp_tunnel, list);
2428out:
2429 read_unlock_bh(&pn->pppol2tp_tunnel_list_lock);
2430
2431 return tunnel;
2432}
2433
2434static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
2435{
2436 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
2437 struct pppol2tp_net *pn;
2438 loff_t pos = *offs;
2439
2440 if (!pos)
2441 goto out;
2442
2443 BUG_ON(m->private == NULL);
2444 pd = m->private;
2445 pn = pppol2tp_pernet(seq_file_net(m));
2446
2447 if (pd->tunnel == NULL) {
2448 if (!list_empty(&pn->pppol2tp_tunnel_list))
2449 pd->tunnel = list_entry(pn->pppol2tp_tunnel_list.next, struct pppol2tp_tunnel, list);
2450 } else {
2451 pd->session = next_session(pd->tunnel, pd->session);
2452 if (pd->session == NULL) {
2453 pd->tunnel = next_tunnel(pn, pd->tunnel);
2454 }
2455 }
2456
2457 /* NULL tunnel and session indicates end of list */
2458 if ((pd->tunnel == NULL) && (pd->session == NULL))
2459 pd = NULL;
2460
2461out:
2462 return pd;
2463}
2464
2465static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
2466{
2467 (*pos)++;
2468 return NULL;
2469}
2470
2471static void pppol2tp_seq_stop(struct seq_file *p, void *v)
2472{
2473 /* nothing to do */
2474}
2475
2476static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
2477{
2478 struct pppol2tp_tunnel *tunnel = v;
2479
2480 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
2481 tunnel->name,
2482 (tunnel == tunnel->sock->sk_user_data) ? 'Y':'N',
2483 atomic_read(&tunnel->ref_count) - 1);
2484 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
2485 tunnel->debug,
2486 (unsigned long long)tunnel->stats.tx_packets,
2487 (unsigned long long)tunnel->stats.tx_bytes,
2488 (unsigned long long)tunnel->stats.tx_errors,
2489 (unsigned long long)tunnel->stats.rx_packets,
2490 (unsigned long long)tunnel->stats.rx_bytes,
2491 (unsigned long long)tunnel->stats.rx_errors);
2492}
2493
2494static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
2495{
2496 struct pppol2tp_session *session = v;
2497
2498 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
2499 "%04X/%04X %d %c\n",
2500 session->name,
2501 ntohl(session->tunnel_addr.addr.sin_addr.s_addr),
2502 ntohs(session->tunnel_addr.addr.sin_port),
2503 session->tunnel_addr.s_tunnel,
2504 session->tunnel_addr.s_session,
2505 session->tunnel_addr.d_tunnel,
2506 session->tunnel_addr.d_session,
2507 session->sock->sk_state,
2508 (session == session->sock->sk_user_data) ?
2509 'Y' : 'N');
2510 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
2511 session->mtu, session->mru,
2512 session->recv_seq ? 'R' : '-',
2513 session->send_seq ? 'S' : '-',
2514 session->lns_mode ? "LNS" : "LAC",
2515 session->debug,
2516 jiffies_to_msecs(session->reorder_timeout));
2517 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
2518 session->nr, session->ns,
2519 (unsigned long long)session->stats.tx_packets,
2520 (unsigned long long)session->stats.tx_bytes,
2521 (unsigned long long)session->stats.tx_errors,
2522 (unsigned long long)session->stats.rx_packets,
2523 (unsigned long long)session->stats.rx_bytes,
2524 (unsigned long long)session->stats.rx_errors);
2525}
2526
2527static int pppol2tp_seq_show(struct seq_file *m, void *v)
2528{
2529 struct pppol2tp_seq_data *pd = v;
2530
2531 /* display header on line 1 */
2532 if (v == SEQ_START_TOKEN) {
2533 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
2534 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
2535 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2536 seq_puts(m, " SESSION name, addr/port src-tid/sid "
2537 "dest-tid/sid state user-data-ok\n");
2538 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
2539 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
2540 goto out;
2541 }
2542
2543 /* Show the tunnel or session context.
2544 */
2545 if (pd->session == NULL)
2546 pppol2tp_seq_tunnel_show(m, pd->tunnel);
2547 else
2548 pppol2tp_seq_session_show(m, pd->session);
2549
2550out:
2551 return 0;
2552}
2553
2554static const struct seq_operations pppol2tp_seq_ops = {
2555 .start = pppol2tp_seq_start,
2556 .next = pppol2tp_seq_next,
2557 .stop = pppol2tp_seq_stop,
2558 .show = pppol2tp_seq_show,
2559};
2560
2561/* Called when our /proc file is opened. We allocate data for use when
2562 * iterating our tunnel / session contexts and store it in the private
2563 * data of the seq_file.
2564 */
2565static int pppol2tp_proc_open(struct inode *inode, struct file *file)
2566{
2567 return seq_open_net(inode, file, &pppol2tp_seq_ops,
2568 sizeof(struct pppol2tp_seq_data));
2569}
2570
2571static const struct file_operations pppol2tp_proc_fops = {
2572 .owner = THIS_MODULE,
2573 .open = pppol2tp_proc_open,
2574 .read = seq_read,
2575 .llseek = seq_lseek,
2576 .release = seq_release_net,
2577};
2578
2579#endif /* CONFIG_PROC_FS */
2580
2581/*****************************************************************************
2582 * Init and cleanup
2583 *****************************************************************************/
2584
2585static const struct proto_ops pppol2tp_ops = {
2586 .family = AF_PPPOX,
2587 .owner = THIS_MODULE,
2588 .release = pppol2tp_release,
2589 .bind = sock_no_bind,
2590 .connect = pppol2tp_connect,
2591 .socketpair = sock_no_socketpair,
2592 .accept = sock_no_accept,
2593 .getname = pppol2tp_getname,
2594 .poll = datagram_poll,
2595 .listen = sock_no_listen,
2596 .shutdown = sock_no_shutdown,
2597 .setsockopt = pppol2tp_setsockopt,
2598 .getsockopt = pppol2tp_getsockopt,
2599 .sendmsg = pppol2tp_sendmsg,
2600 .recvmsg = pppol2tp_recvmsg,
2601 .mmap = sock_no_mmap,
2602 .ioctl = pppox_ioctl,
2603};
2604
2605static struct pppox_proto pppol2tp_proto = {
2606 .create = pppol2tp_create,
2607 .ioctl = pppol2tp_ioctl
2608};
2609
2610static __net_init int pppol2tp_init_net(struct net *net)
2611{
2612 struct pppol2tp_net *pn = pppol2tp_pernet(net);
2613 struct proc_dir_entry *pde;
2614
2615 INIT_LIST_HEAD(&pn->pppol2tp_tunnel_list);
2616 rwlock_init(&pn->pppol2tp_tunnel_list_lock);
2617
2618 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
2619#ifdef CONFIG_PROC_FS
2620 if (!pde)
2621 return -ENOMEM;
2622#endif
2623
2624 return 0;
2625}
2626
2627static __net_exit void pppol2tp_exit_net(struct net *net)
2628{
2629 proc_net_remove(net, "pppol2tp");
2630}
2631
2632static struct pernet_operations pppol2tp_net_ops = {
2633 .init = pppol2tp_init_net,
2634 .exit = pppol2tp_exit_net,
2635 .id = &pppol2tp_net_id,
2636 .size = sizeof(struct pppol2tp_net),
2637};
2638
2639static int __init pppol2tp_init(void)
2640{
2641 int err;
2642
2643 err = proto_register(&pppol2tp_sk_proto, 0);
2644 if (err)
2645 goto out;
2646 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
2647 if (err)
2648 goto out_unregister_pppol2tp_proto;
2649
2650 err = register_pernet_device(&pppol2tp_net_ops);
2651 if (err)
2652 goto out_unregister_pppox_proto;
2653
2654 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
2655 PPPOL2TP_DRV_VERSION);
2656
2657out:
2658 return err;
2659out_unregister_pppox_proto:
2660 unregister_pppox_proto(PX_PROTO_OL2TP);
2661out_unregister_pppol2tp_proto:
2662 proto_unregister(&pppol2tp_sk_proto);
2663 goto out;
2664}
2665
2666static void __exit pppol2tp_exit(void)
2667{
2668 unregister_pppox_proto(PX_PROTO_OL2TP);
2669 unregister_pernet_device(&pppol2tp_net_ops);
2670 proto_unregister(&pppol2tp_sk_proto);
2671}
2672
2673module_init(pppol2tp_init);
2674module_exit(pppol2tp_exit);
2675
2676MODULE_AUTHOR("Martijn van Oosterhout <kleptog@svana.org>, "
2677 "James Chapman <jchapman@katalix.com>");
2678MODULE_DESCRIPTION("PPP over L2TP over UDP");
2679MODULE_LICENSE("GPL");
2680MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 5bf229bb34c2..022317db4673 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -327,7 +327,7 @@ static int gelic_descr_prepare_rx(struct gelic_card *card,
327 unsigned int bufsize; 327 unsigned int bufsize;
328 328
329 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE) 329 if (gelic_descr_get_status(descr) != GELIC_DESCR_DMA_NOT_IN_USE)
330 dev_info(ctodev(card), "%s: ERROR status \n", __func__); 330 dev_info(ctodev(card), "%s: ERROR status\n", __func__);
331 /* we need to round up the buffer size to a multiple of 128 */ 331 /* we need to round up the buffer size to a multiple of 128 */
332 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN); 332 bufsize = ALIGN(GELIC_NET_MAX_MTU, GELIC_NET_RXBUF_ALIGN);
333 333
@@ -547,7 +547,7 @@ out:
547void gelic_net_set_multi(struct net_device *netdev) 547void gelic_net_set_multi(struct net_device *netdev)
548{ 548{
549 struct gelic_card *card = netdev_card(netdev); 549 struct gelic_card *card = netdev_card(netdev);
550 struct dev_mc_list *mc; 550 struct netdev_hw_addr *ha;
551 unsigned int i; 551 unsigned int i;
552 uint8_t *p; 552 uint8_t *p;
553 u64 addr; 553 u64 addr;
@@ -581,9 +581,9 @@ void gelic_net_set_multi(struct net_device *netdev)
581 } 581 }
582 582
583 /* set multicast addresses */ 583 /* set multicast addresses */
584 netdev_for_each_mc_addr(mc, netdev) { 584 netdev_for_each_mc_addr(ha, netdev) {
585 addr = 0; 585 addr = 0;
586 p = mc->dmi_addr; 586 p = ha->addr;
587 for (i = 0; i < ETH_ALEN; i++) { 587 for (i = 0; i < ETH_ALEN; i++) {
588 addr <<= 8; 588 addr <<= 8;
589 addr |= *p++; 589 addr |= *p++;
@@ -1435,7 +1435,7 @@ static void gelic_net_tx_timeout_task(struct work_struct *work)
1435 container_of(work, struct gelic_card, tx_timeout_task); 1435 container_of(work, struct gelic_card, tx_timeout_task);
1436 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0]; 1436 struct net_device *netdev = card->netdev[GELIC_PORT_ETHERNET_0];
1437 1437
1438 dev_info(ctodev(card), "%s:Timed out. Restarting... \n", __func__); 1438 dev_info(ctodev(card), "%s:Timed out. Restarting...\n", __func__);
1439 1439
1440 if (!(netdev->flags & IFF_UP)) 1440 if (!(netdev->flags & IFF_UP))
1441 goto out; 1441 goto out;
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index f0be507e5324..d4ff627c6f7a 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -528,7 +528,7 @@ static void gelic_wl_parse_ie(u8 *data, size_t len,
528 u8 item_len; 528 u8 item_len;
529 u8 item_id; 529 u8 item_id;
530 530
531 pr_debug("%s: data=%p len=%ld \n", __func__, 531 pr_debug("%s: data=%p len=%ld\n", __func__,
532 data, len); 532 data, len);
533 memset(ie_info, 0, sizeof(struct ie_info)); 533 memset(ie_info, 0, sizeof(struct ie_info));
534 534
@@ -979,7 +979,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
979 pr_debug("%s: essid = '%s'\n", __func__, extra); 979 pr_debug("%s: essid = '%s'\n", __func__, extra);
980 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 980 set_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
981 } else { 981 } else {
982 pr_debug("%s: ESSID any \n", __func__); 982 pr_debug("%s: ESSID any\n", __func__);
983 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat); 983 clear_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat);
984 } 984 }
985 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 985 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
@@ -987,7 +987,7 @@ static int gelic_wl_set_essid(struct net_device *netdev,
987 987
988 988
989 gelic_wl_try_associate(netdev); /* FIXME */ 989 gelic_wl_try_associate(netdev); /* FIXME */
990 pr_debug("%s: -> \n", __func__); 990 pr_debug("%s: ->\n", __func__);
991 return 0; 991 return 0;
992} 992}
993 993
@@ -998,7 +998,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
998 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev)); 998 struct gelic_wl_info *wl = port_wl(netdev_priv(netdev));
999 unsigned long irqflag; 999 unsigned long irqflag;
1000 1000
1001 pr_debug("%s: <- \n", __func__); 1001 pr_debug("%s: <-\n", __func__);
1002 mutex_lock(&wl->assoc_stat_lock); 1002 mutex_lock(&wl->assoc_stat_lock);
1003 spin_lock_irqsave(&wl->lock, irqflag); 1003 spin_lock_irqsave(&wl->lock, irqflag);
1004 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) || 1004 if (test_bit(GELIC_WL_STAT_ESSID_SET, &wl->stat) ||
@@ -1011,7 +1011,7 @@ static int gelic_wl_get_essid(struct net_device *netdev,
1011 1011
1012 mutex_unlock(&wl->assoc_stat_lock); 1012 mutex_unlock(&wl->assoc_stat_lock);
1013 spin_unlock_irqrestore(&wl->lock, irqflag); 1013 spin_unlock_irqrestore(&wl->lock, irqflag);
1014 pr_debug("%s: -> len=%d \n", __func__, data->essid.length); 1014 pr_debug("%s: -> len=%d\n", __func__, data->essid.length);
1015 1015
1016 return 0; 1016 return 0;
1017} 1017}
@@ -1028,7 +1028,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1028 int key_index, index_specified; 1028 int key_index, index_specified;
1029 int ret = 0; 1029 int ret = 0;
1030 1030
1031 pr_debug("%s: <- \n", __func__); 1031 pr_debug("%s: <-\n", __func__);
1032 flags = enc->flags & IW_ENCODE_FLAGS; 1032 flags = enc->flags & IW_ENCODE_FLAGS;
1033 key_index = enc->flags & IW_ENCODE_INDEX; 1033 key_index = enc->flags & IW_ENCODE_INDEX;
1034 1034
@@ -1087,7 +1087,7 @@ static int gelic_wl_set_encode(struct net_device *netdev,
1087 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat); 1087 set_bit(GELIC_WL_STAT_CONFIGURED, &wl->stat);
1088done: 1088done:
1089 spin_unlock_irqrestore(&wl->lock, irqflag); 1089 spin_unlock_irqrestore(&wl->lock, irqflag);
1090 pr_debug("%s: -> \n", __func__); 1090 pr_debug("%s: ->\n", __func__);
1091 return ret; 1091 return ret;
1092} 1092}
1093 1093
@@ -1101,7 +1101,7 @@ static int gelic_wl_get_encode(struct net_device *netdev,
1101 unsigned int key_index, index_specified; 1101 unsigned int key_index, index_specified;
1102 int ret = 0; 1102 int ret = 0;
1103 1103
1104 pr_debug("%s: <- \n", __func__); 1104 pr_debug("%s: <-\n", __func__);
1105 key_index = enc->flags & IW_ENCODE_INDEX; 1105 key_index = enc->flags & IW_ENCODE_INDEX;
1106 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__, 1106 pr_debug("%s: flag=%#x point=%p len=%d extra=%p\n", __func__,
1107 enc->flags, enc->pointer, enc->length, extra); 1107 enc->flags, enc->pointer, enc->length, extra);
@@ -1215,7 +1215,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1215 int key_index; 1215 int key_index;
1216 int ret = 0; 1216 int ret = 0;
1217 1217
1218 pr_debug("%s: <- \n", __func__); 1218 pr_debug("%s: <-\n", __func__);
1219 flags = enc->flags & IW_ENCODE_FLAGS; 1219 flags = enc->flags & IW_ENCODE_FLAGS;
1220 alg = ext->alg; 1220 alg = ext->alg;
1221 key_index = enc->flags & IW_ENCODE_INDEX; 1221 key_index = enc->flags & IW_ENCODE_INDEX;
@@ -1288,7 +1288,7 @@ static int gelic_wl_set_encodeext(struct net_device *netdev,
1288 } 1288 }
1289done: 1289done:
1290 spin_unlock_irqrestore(&wl->lock, irqflag); 1290 spin_unlock_irqrestore(&wl->lock, irqflag);
1291 pr_debug("%s: -> \n", __func__); 1291 pr_debug("%s: ->\n", __func__);
1292 return ret; 1292 return ret;
1293} 1293}
1294 1294
@@ -1304,7 +1304,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1304 int ret = 0; 1304 int ret = 0;
1305 int max_key_len; 1305 int max_key_len;
1306 1306
1307 pr_debug("%s: <- \n", __func__); 1307 pr_debug("%s: <-\n", __func__);
1308 1308
1309 max_key_len = enc->length - sizeof(struct iw_encode_ext); 1309 max_key_len = enc->length - sizeof(struct iw_encode_ext);
1310 if (max_key_len < 0) 1310 if (max_key_len < 0)
@@ -1359,7 +1359,7 @@ static int gelic_wl_get_encodeext(struct net_device *netdev,
1359 } 1359 }
1360out: 1360out:
1361 spin_unlock_irqrestore(&wl->lock, irqflag); 1361 spin_unlock_irqrestore(&wl->lock, irqflag);
1362 pr_debug("%s: -> \n", __func__); 1362 pr_debug("%s: ->\n", __func__);
1363 return ret; 1363 return ret;
1364} 1364}
1365/* SIOC{S,G}IWMODE */ 1365/* SIOC{S,G}IWMODE */
@@ -1370,7 +1370,7 @@ static int gelic_wl_set_mode(struct net_device *netdev,
1370 __u32 mode = data->mode; 1370 __u32 mode = data->mode;
1371 int ret; 1371 int ret;
1372 1372
1373 pr_debug("%s: <- \n", __func__); 1373 pr_debug("%s: <-\n", __func__);
1374 if (mode == IW_MODE_INFRA) 1374 if (mode == IW_MODE_INFRA)
1375 ret = 0; 1375 ret = 0;
1376 else 1376 else
@@ -1384,7 +1384,7 @@ static int gelic_wl_get_mode(struct net_device *netdev,
1384 union iwreq_data *data, char *extra) 1384 union iwreq_data *data, char *extra)
1385{ 1385{
1386 __u32 *mode = &data->mode; 1386 __u32 *mode = &data->mode;
1387 pr_debug("%s: <- \n", __func__); 1387 pr_debug("%s: <-\n", __func__);
1388 *mode = IW_MODE_INFRA; 1388 *mode = IW_MODE_INFRA;
1389 pr_debug("%s: ->\n", __func__); 1389 pr_debug("%s: ->\n", __func__);
1390 return 0; 1390 return 0;
@@ -2022,7 +2022,7 @@ static int gelic_wl_associate_bss(struct gelic_wl_info *wl,
2022 2022
2023 if (!rc) { 2023 if (!rc) {
2024 /* timeouted. Maybe key or cyrpt mode is wrong */ 2024 /* timeouted. Maybe key or cyrpt mode is wrong */
2025 pr_info("%s: connect timeout \n", __func__); 2025 pr_info("%s: connect timeout\n", __func__);
2026 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC, 2026 cmd = gelic_eurus_sync_cmd(wl, GELIC_EURUS_CMD_DISASSOC,
2027 NULL, 0); 2027 NULL, 0);
2028 kfree(cmd); 2028 kfree(cmd);
@@ -2063,7 +2063,7 @@ static void gelic_wl_connected_event(struct gelic_wl_info *wl,
2063 } 2063 }
2064 2064
2065 if (desired_event == event) { 2065 if (desired_event == event) {
2066 pr_debug("%s: completed \n", __func__); 2066 pr_debug("%s: completed\n", __func__);
2067 complete(&wl->assoc_done); 2067 complete(&wl->assoc_done);
2068 netif_carrier_on(port_to_netdev(wl_port(wl))); 2068 netif_carrier_on(port_to_netdev(wl_port(wl)));
2069 } else 2069 } else
@@ -2280,26 +2280,25 @@ void gelic_wl_interrupt(struct net_device *netdev, u64 status)
2280/* 2280/*
2281 * driver helpers 2281 * driver helpers
2282 */ 2282 */
2283#define IW_IOCTL(n) [(n) - SIOCSIWCOMMIT]
2284static const iw_handler gelic_wl_wext_handler[] = 2283static const iw_handler gelic_wl_wext_handler[] =
2285{ 2284{
2286 IW_IOCTL(SIOCGIWNAME) = gelic_wl_get_name, 2285 IW_HANDLER(SIOCGIWNAME, gelic_wl_get_name),
2287 IW_IOCTL(SIOCGIWRANGE) = gelic_wl_get_range, 2286 IW_HANDLER(SIOCGIWRANGE, gelic_wl_get_range),
2288 IW_IOCTL(SIOCSIWSCAN) = gelic_wl_set_scan, 2287 IW_HANDLER(SIOCSIWSCAN, gelic_wl_set_scan),
2289 IW_IOCTL(SIOCGIWSCAN) = gelic_wl_get_scan, 2288 IW_HANDLER(SIOCGIWSCAN, gelic_wl_get_scan),
2290 IW_IOCTL(SIOCSIWAUTH) = gelic_wl_set_auth, 2289 IW_HANDLER(SIOCSIWAUTH, gelic_wl_set_auth),
2291 IW_IOCTL(SIOCGIWAUTH) = gelic_wl_get_auth, 2290 IW_HANDLER(SIOCGIWAUTH, gelic_wl_get_auth),
2292 IW_IOCTL(SIOCSIWESSID) = gelic_wl_set_essid, 2291 IW_HANDLER(SIOCSIWESSID, gelic_wl_set_essid),
2293 IW_IOCTL(SIOCGIWESSID) = gelic_wl_get_essid, 2292 IW_HANDLER(SIOCGIWESSID, gelic_wl_get_essid),
2294 IW_IOCTL(SIOCSIWENCODE) = gelic_wl_set_encode, 2293 IW_HANDLER(SIOCSIWENCODE, gelic_wl_set_encode),
2295 IW_IOCTL(SIOCGIWENCODE) = gelic_wl_get_encode, 2294 IW_HANDLER(SIOCGIWENCODE, gelic_wl_get_encode),
2296 IW_IOCTL(SIOCSIWAP) = gelic_wl_set_ap, 2295 IW_HANDLER(SIOCSIWAP, gelic_wl_set_ap),
2297 IW_IOCTL(SIOCGIWAP) = gelic_wl_get_ap, 2296 IW_HANDLER(SIOCGIWAP, gelic_wl_get_ap),
2298 IW_IOCTL(SIOCSIWENCODEEXT) = gelic_wl_set_encodeext, 2297 IW_HANDLER(SIOCSIWENCODEEXT, gelic_wl_set_encodeext),
2299 IW_IOCTL(SIOCGIWENCODEEXT) = gelic_wl_get_encodeext, 2298 IW_HANDLER(SIOCGIWENCODEEXT, gelic_wl_get_encodeext),
2300 IW_IOCTL(SIOCSIWMODE) = gelic_wl_set_mode, 2299 IW_HANDLER(SIOCSIWMODE, gelic_wl_set_mode),
2301 IW_IOCTL(SIOCGIWMODE) = gelic_wl_get_mode, 2300 IW_HANDLER(SIOCGIWMODE, gelic_wl_get_mode),
2302 IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick, 2301 IW_HANDLER(SIOCGIWNICKN, gelic_wl_get_nick),
2303}; 2302};
2304 2303
2305static const struct iw_handler_def gelic_wl_wext_handler_def = { 2304static const struct iw_handler_def gelic_wl_wext_handler_def = {
@@ -2318,7 +2317,7 @@ static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
2318 pr_debug("%s:start\n", __func__); 2317 pr_debug("%s:start\n", __func__);
2319 netdev = alloc_etherdev(sizeof(struct gelic_port) + 2318 netdev = alloc_etherdev(sizeof(struct gelic_port) +
2320 sizeof(struct gelic_wl_info)); 2319 sizeof(struct gelic_wl_info));
2321 pr_debug("%s: netdev =%p card=%p \np", __func__, netdev, card); 2320 pr_debug("%s: netdev =%p card=%p\n", __func__, netdev, card);
2322 if (!netdev) 2321 if (!netdev)
2323 return NULL; 2322 return NULL;
2324 2323
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 4ef0afbcbe1b..01a6ca303a17 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -343,8 +343,8 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
343 cpu_to_le32(LS_64BITS(map)); 343 cpu_to_le32(LS_64BITS(map));
344 lrg_buf_cb->buf_phy_addr_high = 344 lrg_buf_cb->buf_phy_addr_high =
345 cpu_to_le32(MS_64BITS(map)); 345 cpu_to_le32(MS_64BITS(map));
346 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 346 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
347 pci_unmap_len_set(lrg_buf_cb, maplen, 347 dma_unmap_len_set(lrg_buf_cb, maplen,
348 qdev->lrg_buffer_len - 348 qdev->lrg_buffer_len -
349 QL_HEADER_SPACE); 349 QL_HEADER_SPACE);
350 } 350 }
@@ -1924,8 +1924,8 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
1924 cpu_to_le32(LS_64BITS(map)); 1924 cpu_to_le32(LS_64BITS(map));
1925 lrg_buf_cb->buf_phy_addr_high = 1925 lrg_buf_cb->buf_phy_addr_high =
1926 cpu_to_le32(MS_64BITS(map)); 1926 cpu_to_le32(MS_64BITS(map));
1927 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 1927 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
1928 pci_unmap_len_set(lrg_buf_cb, maplen, 1928 dma_unmap_len_set(lrg_buf_cb, maplen,
1929 qdev->lrg_buffer_len - 1929 qdev->lrg_buffer_len -
1930 QL_HEADER_SPACE); 1930 QL_HEADER_SPACE);
1931 --qdev->lrg_buf_skb_check; 1931 --qdev->lrg_buf_skb_check;
@@ -2041,16 +2041,16 @@ static void ql_process_mac_tx_intr(struct ql3_adapter *qdev,
2041 } 2041 }
2042 2042
2043 pci_unmap_single(qdev->pdev, 2043 pci_unmap_single(qdev->pdev,
2044 pci_unmap_addr(&tx_cb->map[0], mapaddr), 2044 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2045 pci_unmap_len(&tx_cb->map[0], maplen), 2045 dma_unmap_len(&tx_cb->map[0], maplen),
2046 PCI_DMA_TODEVICE); 2046 PCI_DMA_TODEVICE);
2047 tx_cb->seg_count--; 2047 tx_cb->seg_count--;
2048 if (tx_cb->seg_count) { 2048 if (tx_cb->seg_count) {
2049 for (i = 1; i < tx_cb->seg_count; i++) { 2049 for (i = 1; i < tx_cb->seg_count; i++) {
2050 pci_unmap_page(qdev->pdev, 2050 pci_unmap_page(qdev->pdev,
2051 pci_unmap_addr(&tx_cb->map[i], 2051 dma_unmap_addr(&tx_cb->map[i],
2052 mapaddr), 2052 mapaddr),
2053 pci_unmap_len(&tx_cb->map[i], maplen), 2053 dma_unmap_len(&tx_cb->map[i], maplen),
2054 PCI_DMA_TODEVICE); 2054 PCI_DMA_TODEVICE);
2055 } 2055 }
2056 } 2056 }
@@ -2119,8 +2119,8 @@ static void ql_process_mac_rx_intr(struct ql3_adapter *qdev,
2119 2119
2120 skb_put(skb, length); 2120 skb_put(skb, length);
2121 pci_unmap_single(qdev->pdev, 2121 pci_unmap_single(qdev->pdev,
2122 pci_unmap_addr(lrg_buf_cb2, mapaddr), 2122 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2123 pci_unmap_len(lrg_buf_cb2, maplen), 2123 dma_unmap_len(lrg_buf_cb2, maplen),
2124 PCI_DMA_FROMDEVICE); 2124 PCI_DMA_FROMDEVICE);
2125 prefetch(skb->data); 2125 prefetch(skb->data);
2126 skb->ip_summed = CHECKSUM_NONE; 2126 skb->ip_summed = CHECKSUM_NONE;
@@ -2165,8 +2165,8 @@ static void ql_process_macip_rx_intr(struct ql3_adapter *qdev,
2165 2165
2166 skb_put(skb2, length); /* Just the second buffer length here. */ 2166 skb_put(skb2, length); /* Just the second buffer length here. */
2167 pci_unmap_single(qdev->pdev, 2167 pci_unmap_single(qdev->pdev,
2168 pci_unmap_addr(lrg_buf_cb2, mapaddr), 2168 dma_unmap_addr(lrg_buf_cb2, mapaddr),
2169 pci_unmap_len(lrg_buf_cb2, maplen), 2169 dma_unmap_len(lrg_buf_cb2, maplen),
2170 PCI_DMA_FROMDEVICE); 2170 PCI_DMA_FROMDEVICE);
2171 prefetch(skb2->data); 2171 prefetch(skb2->data);
2172 2172
@@ -2258,7 +2258,7 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
2258 "%x.\n", 2258 "%x.\n",
2259 ndev->name, net_rsp->opcode); 2259 ndev->name, net_rsp->opcode);
2260 printk(KERN_ERR PFX 2260 printk(KERN_ERR PFX
2261 "0x%08lx 0x%08lx 0x%08lx 0x%08lx \n", 2261 "0x%08lx 0x%08lx 0x%08lx 0x%08lx\n",
2262 (unsigned long int)tmp[0], 2262 (unsigned long int)tmp[0],
2263 (unsigned long int)tmp[1], 2263 (unsigned long int)tmp[1],
2264 (unsigned long int)tmp[2], 2264 (unsigned long int)tmp[2],
@@ -2454,8 +2454,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
2454 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2454 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2455 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2455 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2456 oal_entry->len = cpu_to_le32(len); 2456 oal_entry->len = cpu_to_le32(len);
2457 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2457 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2458 pci_unmap_len_set(&tx_cb->map[seg], maplen, len); 2458 dma_unmap_len_set(&tx_cb->map[seg], maplen, len);
2459 seg++; 2459 seg++;
2460 2460
2461 if (seg_cnt == 1) { 2461 if (seg_cnt == 1) {
@@ -2488,9 +2488,9 @@ static int ql_send_map(struct ql3_adapter *qdev,
2488 oal_entry->len = 2488 oal_entry->len =
2489 cpu_to_le32(sizeof(struct oal) | 2489 cpu_to_le32(sizeof(struct oal) |
2490 OAL_CONT_ENTRY); 2490 OAL_CONT_ENTRY);
2491 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, 2491 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr,
2492 map); 2492 map);
2493 pci_unmap_len_set(&tx_cb->map[seg], maplen, 2493 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2494 sizeof(struct oal)); 2494 sizeof(struct oal));
2495 oal_entry = (struct oal_entry *)oal; 2495 oal_entry = (struct oal_entry *)oal;
2496 oal++; 2496 oal++;
@@ -2512,8 +2512,8 @@ static int ql_send_map(struct ql3_adapter *qdev,
2512 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map)); 2512 oal_entry->dma_lo = cpu_to_le32(LS_64BITS(map));
2513 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map)); 2513 oal_entry->dma_hi = cpu_to_le32(MS_64BITS(map));
2514 oal_entry->len = cpu_to_le32(frag->size); 2514 oal_entry->len = cpu_to_le32(frag->size);
2515 pci_unmap_addr_set(&tx_cb->map[seg], mapaddr, map); 2515 dma_unmap_addr_set(&tx_cb->map[seg], mapaddr, map);
2516 pci_unmap_len_set(&tx_cb->map[seg], maplen, 2516 dma_unmap_len_set(&tx_cb->map[seg], maplen,
2517 frag->size); 2517 frag->size);
2518 } 2518 }
2519 /* Terminate the last segment. */ 2519 /* Terminate the last segment. */
@@ -2539,22 +2539,22 @@ map_error:
2539 (seg == 12 && seg_cnt > 13) || /* but necessary. */ 2539 (seg == 12 && seg_cnt > 13) || /* but necessary. */
2540 (seg == 17 && seg_cnt > 18)) { 2540 (seg == 17 && seg_cnt > 18)) {
2541 pci_unmap_single(qdev->pdev, 2541 pci_unmap_single(qdev->pdev,
2542 pci_unmap_addr(&tx_cb->map[seg], mapaddr), 2542 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2543 pci_unmap_len(&tx_cb->map[seg], maplen), 2543 dma_unmap_len(&tx_cb->map[seg], maplen),
2544 PCI_DMA_TODEVICE); 2544 PCI_DMA_TODEVICE);
2545 oal++; 2545 oal++;
2546 seg++; 2546 seg++;
2547 } 2547 }
2548 2548
2549 pci_unmap_page(qdev->pdev, 2549 pci_unmap_page(qdev->pdev,
2550 pci_unmap_addr(&tx_cb->map[seg], mapaddr), 2550 dma_unmap_addr(&tx_cb->map[seg], mapaddr),
2551 pci_unmap_len(&tx_cb->map[seg], maplen), 2551 dma_unmap_len(&tx_cb->map[seg], maplen),
2552 PCI_DMA_TODEVICE); 2552 PCI_DMA_TODEVICE);
2553 } 2553 }
2554 2554
2555 pci_unmap_single(qdev->pdev, 2555 pci_unmap_single(qdev->pdev,
2556 pci_unmap_addr(&tx_cb->map[0], mapaddr), 2556 dma_unmap_addr(&tx_cb->map[0], mapaddr),
2557 pci_unmap_addr(&tx_cb->map[0], maplen), 2557 dma_unmap_addr(&tx_cb->map[0], maplen),
2558 PCI_DMA_TODEVICE); 2558 PCI_DMA_TODEVICE);
2559 2559
2560 return NETDEV_TX_BUSY; 2560 return NETDEV_TX_BUSY;
@@ -2841,8 +2841,8 @@ static void ql_free_large_buffers(struct ql3_adapter *qdev)
2841 if (lrg_buf_cb->skb) { 2841 if (lrg_buf_cb->skb) {
2842 dev_kfree_skb(lrg_buf_cb->skb); 2842 dev_kfree_skb(lrg_buf_cb->skb);
2843 pci_unmap_single(qdev->pdev, 2843 pci_unmap_single(qdev->pdev,
2844 pci_unmap_addr(lrg_buf_cb, mapaddr), 2844 dma_unmap_addr(lrg_buf_cb, mapaddr),
2845 pci_unmap_len(lrg_buf_cb, maplen), 2845 dma_unmap_len(lrg_buf_cb, maplen),
2846 PCI_DMA_FROMDEVICE); 2846 PCI_DMA_FROMDEVICE);
2847 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb)); 2847 memset(lrg_buf_cb, 0, sizeof(struct ql_rcv_buf_cb));
2848 } else { 2848 } else {
@@ -2912,8 +2912,8 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
2912 return -ENOMEM; 2912 return -ENOMEM;
2913 } 2913 }
2914 2914
2915 pci_unmap_addr_set(lrg_buf_cb, mapaddr, map); 2915 dma_unmap_addr_set(lrg_buf_cb, mapaddr, map);
2916 pci_unmap_len_set(lrg_buf_cb, maplen, 2916 dma_unmap_len_set(lrg_buf_cb, maplen,
2917 qdev->lrg_buffer_len - 2917 qdev->lrg_buffer_len -
2918 QL_HEADER_SPACE); 2918 QL_HEADER_SPACE);
2919 lrg_buf_cb->buf_phy_addr_low = 2919 lrg_buf_cb->buf_phy_addr_low =
@@ -3793,13 +3793,13 @@ static void ql_reset_work(struct work_struct *work)
3793 "%s: Freeing lost SKB.\n", 3793 "%s: Freeing lost SKB.\n",
3794 qdev->ndev->name); 3794 qdev->ndev->name);
3795 pci_unmap_single(qdev->pdev, 3795 pci_unmap_single(qdev->pdev,
3796 pci_unmap_addr(&tx_cb->map[0], mapaddr), 3796 dma_unmap_addr(&tx_cb->map[0], mapaddr),
3797 pci_unmap_len(&tx_cb->map[0], maplen), 3797 dma_unmap_len(&tx_cb->map[0], maplen),
3798 PCI_DMA_TODEVICE); 3798 PCI_DMA_TODEVICE);
3799 for(j=1;j<tx_cb->seg_count;j++) { 3799 for(j=1;j<tx_cb->seg_count;j++) {
3800 pci_unmap_page(qdev->pdev, 3800 pci_unmap_page(qdev->pdev,
3801 pci_unmap_addr(&tx_cb->map[j],mapaddr), 3801 dma_unmap_addr(&tx_cb->map[j],mapaddr),
3802 pci_unmap_len(&tx_cb->map[j],maplen), 3802 dma_unmap_len(&tx_cb->map[j],maplen),
3803 PCI_DMA_TODEVICE); 3803 PCI_DMA_TODEVICE);
3804 } 3804 }
3805 dev_kfree_skb(tx_cb->skb); 3805 dev_kfree_skb(tx_cb->skb);
diff --git a/drivers/net/qla3xxx.h b/drivers/net/qla3xxx.h
index 7113e71b15a1..3362a661248c 100644
--- a/drivers/net/qla3xxx.h
+++ b/drivers/net/qla3xxx.h
@@ -998,8 +998,8 @@ enum link_state_t {
998struct ql_rcv_buf_cb { 998struct ql_rcv_buf_cb {
999 struct ql_rcv_buf_cb *next; 999 struct ql_rcv_buf_cb *next;
1000 struct sk_buff *skb; 1000 struct sk_buff *skb;
1001 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1001 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1002 DECLARE_PCI_UNMAP_LEN(maplen); 1002 DEFINE_DMA_UNMAP_LEN(maplen);
1003 __le32 buf_phy_addr_low; 1003 __le32 buf_phy_addr_low;
1004 __le32 buf_phy_addr_high; 1004 __le32 buf_phy_addr_high;
1005 int index; 1005 int index;
@@ -1029,8 +1029,8 @@ struct oal {
1029}; 1029};
1030 1030
1031struct map_list { 1031struct map_list {
1032 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1032 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1033 DECLARE_PCI_UNMAP_LEN(maplen); 1033 DEFINE_DMA_UNMAP_LEN(maplen);
1034}; 1034};
1035 1035
1036struct ql_tx_buf_cb { 1036struct ql_tx_buf_cb {
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 0da94b208db1..28c148cbe37b 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -51,8 +51,8 @@
51 51
52#define _QLCNIC_LINUX_MAJOR 5 52#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 53#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 0 54#define _QLCNIC_LINUX_SUBVERSION 1
55#define QLCNIC_LINUX_VERSIONID "5.0.0" 55#define QLCNIC_LINUX_VERSIONID "5.0.1"
56 56
57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 57#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
58#define _major(v) (((v) >> 24) & 0xff) 58#define _major(v) (((v) >> 24) & 0xff)
@@ -958,8 +958,10 @@ struct qlcnic_adapter {
958 u8 dev_state; 958 u8 dev_state;
959 u8 diag_test; 959 u8 diag_test;
960 u8 diag_cnt; 960 u8 diag_cnt;
961 u8 reset_ack_timeo;
962 u8 dev_init_timeo;
961 u8 rsrd1; 963 u8 rsrd1;
962 u16 rsrd2; 964 u16 msg_enable;
963 965
964 u8 mac_addr[ETH_ALEN]; 966 u8 mac_addr[ETH_ALEN];
965 967
@@ -994,6 +996,11 @@ u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
994int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data); 996int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
995int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data); 997int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
996int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data); 998int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
999void qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *, u64, u64 *);
1000void qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *, u64, u64);
1001
1002#define ADDR_IN_RANGE(addr, low, high) \
1003 (((addr) < (high)) && ((addr) >= (low)))
997 1004
998#define QLCRD32(adapter, off) \ 1005#define QLCRD32(adapter, off) \
999 (qlcnic_hw_read_wx_2M(adapter, off)) 1006 (qlcnic_hw_read_wx_2M(adapter, off))
@@ -1035,6 +1042,7 @@ int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
1035void qlcnic_request_firmware(struct qlcnic_adapter *adapter); 1042void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
1036void qlcnic_release_firmware(struct qlcnic_adapter *adapter); 1043void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
1037int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter); 1044int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
1045void qlcnic_setup_idc_param(struct qlcnic_adapter *adapter);
1038 1046
1039int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp); 1047int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
1040int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr, 1048int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
@@ -1128,4 +1136,11 @@ static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
1128 1136
1129extern const struct ethtool_ops qlcnic_ethtool_ops; 1137extern const struct ethtool_ops qlcnic_ethtool_ops;
1130 1138
1139#define QLCDB(adapter, lvl, _fmt, _args...) do { \
1140 if (NETIF_MSG_##lvl & adapter->msg_enable) \
1141 printk(KERN_INFO "%s: %s: " _fmt, \
1142 dev_name(&adapter->pdev->dev), \
1143 __func__, ##_args); \
1144 } while (0)
1145
1131#endif /* __QLCNIC_H_ */ 1146#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index f83e15fe3e1b..08d6f105371f 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -998,6 +998,20 @@ static int qlcnic_set_flags(struct net_device *netdev, u32 data)
998 return 0; 998 return 0;
999} 999}
1000 1000
1001static u32 qlcnic_get_msglevel(struct net_device *netdev)
1002{
1003 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1004
1005 return adapter->msg_enable;
1006}
1007
1008static void qlcnic_set_msglevel(struct net_device *netdev, u32 msglvl)
1009{
1010 struct qlcnic_adapter *adapter = netdev_priv(netdev);
1011
1012 adapter->msg_enable = msglvl;
1013}
1014
1001const struct ethtool_ops qlcnic_ethtool_ops = { 1015const struct ethtool_ops qlcnic_ethtool_ops = {
1002 .get_settings = qlcnic_get_settings, 1016 .get_settings = qlcnic_get_settings,
1003 .set_settings = qlcnic_set_settings, 1017 .set_settings = qlcnic_set_settings,
@@ -1029,4 +1043,6 @@ const struct ethtool_ops qlcnic_ethtool_ops = {
1029 .get_flags = ethtool_op_get_flags, 1043 .get_flags = ethtool_op_get_flags,
1030 .set_flags = qlcnic_set_flags, 1044 .set_flags = qlcnic_set_flags,
1031 .phys_id = qlcnic_blink_led, 1045 .phys_id = qlcnic_blink_led,
1046 .set_msglevel = qlcnic_set_msglevel,
1047 .get_msglevel = qlcnic_get_msglevel,
1032}; 1048};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 0469f84360a4..51fa3fbcf58a 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -435,9 +435,10 @@ enum {
435#define QLCNIC_PCI_MS_2M (0x80000) 435#define QLCNIC_PCI_MS_2M (0x80000)
436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL) 436#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
437#define QLCNIC_PCI_CRBSPACE (0x06000000UL) 437#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
438#define QLCNIC_PCI_CAMQM (0x04800000UL)
439#define QLCNIC_PCI_CAMQM_END (0x04800800UL)
438#define QLCNIC_PCI_2MB_SIZE (0x00200000UL) 440#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
439#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL) 441#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
440#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
441 442
442#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM) 443#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
443 444
@@ -448,7 +449,7 @@ enum {
448#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL) 449#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
449#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL) 450#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
450#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL) 451#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
451#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL) 452#define QLCNIC_ADDR_QDR_NET_MAX (0x0000000307ffffffULL)
452 453
453/* 454/*
454 * Register offsets for MN 455 * Register offsets for MN
@@ -694,6 +695,8 @@ enum {
694#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148)) 695#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
695#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c)) 696#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
696#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c)) 697#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
698#define QLCNIC_ROM_DEV_INIT_TIMEOUT (0x3e885c)
699#define QLCNIC_ROM_DRV_RESET_TIMEOUT (0x3e8860)
697 700
698 /* Device State */ 701 /* Device State */
699#define QLCNIC_DEV_COLD 1 702#define QLCNIC_DEV_COLD 1
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index e73ba455aa20..7a72b8d06bcb 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -54,21 +54,6 @@ static inline void writeq(u64 val, void __iomem *addr)
54} 54}
55#endif 55#endif
56 56
57#define ADDR_IN_RANGE(addr, low, high) \
58 (((addr) < (high)) && ((addr) >= (low)))
59
60#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
61 ((adapter)->ahw.pci_base0 + (off))
62
63static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
64 unsigned long off)
65{
66 if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
67 return PCI_OFFSET_FIRST_RANGE(adapter, off);
68
69 return NULL;
70}
71
72static const struct crb_128M_2M_block_map 57static const struct crb_128M_2M_block_map
73crb_128M_2M_map[64] __cacheline_aligned_in_smp = { 58crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
74 {{{0, 0, 0, 0} } }, /* 0: PCI */ 59 {{{0, 0, 0, 0} } }, /* 0: PCI */
@@ -310,8 +295,12 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
310 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem))); 295 done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
311 if (done == 1) 296 if (done == 1)
312 break; 297 break;
313 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) 298 if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
299 dev_err(&adapter->pdev->dev,
300 "Failed to acquire sem=%d lock;reg_id=%d\n",
301 sem, id_reg);
314 return -EIO; 302 return -EIO;
303 }
315 msleep(1); 304 msleep(1);
316 } 305 }
317 306
@@ -427,7 +416,7 @@ static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr)
427void qlcnic_set_multi(struct net_device *netdev) 416void qlcnic_set_multi(struct net_device *netdev)
428{ 417{
429 struct qlcnic_adapter *adapter = netdev_priv(netdev); 418 struct qlcnic_adapter *adapter = netdev_priv(netdev);
430 struct dev_mc_list *mc_ptr; 419 struct netdev_hw_addr *ha;
431 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 420 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
432 u32 mode = VPORT_MISS_MODE_DROP; 421 u32 mode = VPORT_MISS_MODE_DROP;
433 422
@@ -449,8 +438,8 @@ void qlcnic_set_multi(struct net_device *netdev)
449 } 438 }
450 439
451 if (!netdev_mc_empty(netdev)) { 440 if (!netdev_mc_empty(netdev)) {
452 netdev_for_each_mc_addr(mc_ptr, netdev) { 441 netdev_for_each_mc_addr(ha, netdev) {
453 qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr); 442 qlcnic_nic_add_mac(adapter, ha->addr);
454 } 443 }
455 } 444 }
456 445
@@ -878,13 +867,6 @@ qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
878 u64 addr, u32 *start) 867 u64 addr, u32 *start)
879{ 868{
880 u32 window; 869 u32 window;
881 struct pci_dev *pdev = adapter->pdev;
882
883 if ((addr & 0x00ff800) == 0xff800) {
884 if (printk_ratelimit())
885 dev_warn(&pdev->dev, "QM access not handled\n");
886 return -EIO;
887 }
888 870
889 window = OCM_WIN_P3P(addr); 871 window = OCM_WIN_P3P(addr);
890 872
@@ -901,8 +883,7 @@ static int
901qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off, 883qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
902 u64 *data, int op) 884 u64 *data, int op)
903{ 885{
904 void __iomem *addr, *mem_ptr = NULL; 886 void __iomem *addr;
905 resource_size_t mem_base;
906 int ret; 887 int ret;
907 u32 start; 888 u32 start;
908 889
@@ -912,21 +893,8 @@ qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
912 if (ret != 0) 893 if (ret != 0)
913 goto unlock; 894 goto unlock;
914 895
915 addr = pci_base_offset(adapter, start); 896 addr = adapter->ahw.pci_base0 + start;
916 if (addr)
917 goto noremap;
918 897
919 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
920
921 mem_ptr = ioremap(mem_base, PAGE_SIZE);
922 if (mem_ptr == NULL) {
923 ret = -EIO;
924 goto unlock;
925 }
926
927 addr = mem_ptr + (start & (PAGE_SIZE - 1));
928
929noremap:
930 if (op == 0) /* read */ 898 if (op == 0) /* read */
931 *data = readq(addr); 899 *data = readq(addr);
932 else /* write */ 900 else /* write */
@@ -935,11 +903,31 @@ noremap:
935unlock: 903unlock:
936 mutex_unlock(&adapter->ahw.mem_lock); 904 mutex_unlock(&adapter->ahw.mem_lock);
937 905
938 if (mem_ptr)
939 iounmap(mem_ptr);
940 return ret; 906 return ret;
941} 907}
942 908
909void
910qlcnic_pci_camqm_read_2M(struct qlcnic_adapter *adapter, u64 off, u64 *data)
911{
912 void __iomem *addr = adapter->ahw.pci_base0 +
913 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
914
915 mutex_lock(&adapter->ahw.mem_lock);
916 *data = readq(addr);
917 mutex_unlock(&adapter->ahw.mem_lock);
918}
919
920void
921qlcnic_pci_camqm_write_2M(struct qlcnic_adapter *adapter, u64 off, u64 data)
922{
923 void __iomem *addr = adapter->ahw.pci_base0 +
924 QLCNIC_PCI_CAMQM_2M_BASE + (off - QLCNIC_PCI_CAMQM);
925
926 mutex_lock(&adapter->ahw.mem_lock);
927 writeq(data, addr);
928 mutex_unlock(&adapter->ahw.mem_lock);
929}
930
943#define MAX_CTL_CHECK 1000 931#define MAX_CTL_CHECK 1000
944 932
945int 933int
@@ -948,7 +936,6 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
948{ 936{
949 int i, j, ret; 937 int i, j, ret;
950 u32 temp, off8; 938 u32 temp, off8;
951 u64 stride;
952 void __iomem *mem_crb; 939 void __iomem *mem_crb;
953 940
954 /* Only 64-bit aligned access */ 941 /* Only 64-bit aligned access */
@@ -957,7 +944,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
957 944
958 /* P3 onward, test agent base for MIU and SIU is same */ 945 /* P3 onward, test agent base for MIU and SIU is same */
959 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 946 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
960 QLCNIC_ADDR_QDR_NET_MAX_P3)) { 947 QLCNIC_ADDR_QDR_NET_MAX)) {
961 mem_crb = qlcnic_get_ioaddr(adapter, 948 mem_crb = qlcnic_get_ioaddr(adapter,
962 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); 949 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
963 goto correct; 950 goto correct;
@@ -975,9 +962,7 @@ qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
975 return -EIO; 962 return -EIO;
976 963
977correct: 964correct:
978 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 965 off8 = off & ~0xf;
979
980 off8 = off & ~(stride-1);
981 966
982 mutex_lock(&adapter->ahw.mem_lock); 967 mutex_lock(&adapter->ahw.mem_lock);
983 968
@@ -985,30 +970,28 @@ correct:
985 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI)); 970 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
986 971
987 i = 0; 972 i = 0;
988 if (stride == 16) { 973 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
989 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL)); 974 writel((TA_CTL_START | TA_CTL_ENABLE),
990 writel((TA_CTL_START | TA_CTL_ENABLE), 975 (mem_crb + TEST_AGT_CTRL));
991 (mem_crb + TEST_AGT_CTRL));
992
993 for (j = 0; j < MAX_CTL_CHECK; j++) {
994 temp = readl(mem_crb + TEST_AGT_CTRL);
995 if ((temp & TA_CTL_BUSY) == 0)
996 break;
997 }
998 976
999 if (j >= MAX_CTL_CHECK) { 977 for (j = 0; j < MAX_CTL_CHECK; j++) {
1000 ret = -EIO; 978 temp = readl(mem_crb + TEST_AGT_CTRL);
1001 goto done; 979 if ((temp & TA_CTL_BUSY) == 0)
1002 } 980 break;
981 }
1003 982
1004 i = (off & 0xf) ? 0 : 2; 983 if (j >= MAX_CTL_CHECK) {
1005 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)), 984 ret = -EIO;
1006 mem_crb + MIU_TEST_AGT_WRDATA(i)); 985 goto done;
1007 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1008 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1009 i = (off & 0xf) ? 2 : 0;
1010 } 986 }
1011 987
988 i = (off & 0xf) ? 0 : 2;
989 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
990 mem_crb + MIU_TEST_AGT_WRDATA(i));
991 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
992 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
993 i = (off & 0xf) ? 2 : 0;
994
1012 writel(data & 0xffffffff, 995 writel(data & 0xffffffff,
1013 mem_crb + MIU_TEST_AGT_WRDATA(i)); 996 mem_crb + MIU_TEST_AGT_WRDATA(i));
1014 writel((data >> 32) & 0xffffffff, 997 writel((data >> 32) & 0xffffffff,
@@ -1044,7 +1027,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1044{ 1027{
1045 int j, ret; 1028 int j, ret;
1046 u32 temp, off8; 1029 u32 temp, off8;
1047 u64 val, stride; 1030 u64 val;
1048 void __iomem *mem_crb; 1031 void __iomem *mem_crb;
1049 1032
1050 /* Only 64-bit aligned access */ 1033 /* Only 64-bit aligned access */
@@ -1053,7 +1036,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1053 1036
1054 /* P3 onward, test agent base for MIU and SIU is same */ 1037 /* P3 onward, test agent base for MIU and SIU is same */
1055 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET, 1038 if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
1056 QLCNIC_ADDR_QDR_NET_MAX_P3)) { 1039 QLCNIC_ADDR_QDR_NET_MAX)) {
1057 mem_crb = qlcnic_get_ioaddr(adapter, 1040 mem_crb = qlcnic_get_ioaddr(adapter,
1058 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE); 1041 QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1059 goto correct; 1042 goto correct;
@@ -1073,9 +1056,7 @@ qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
1073 return -EIO; 1056 return -EIO;
1074 1057
1075correct: 1058correct:
1076 stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8; 1059 off8 = off & ~0xf;
1077
1078 off8 = off & ~(stride-1);
1079 1060
1080 mutex_lock(&adapter->ahw.mem_lock); 1061 mutex_lock(&adapter->ahw.mem_lock);
1081 1062
@@ -1097,7 +1078,7 @@ correct:
1097 ret = -EIO; 1078 ret = -EIO;
1098 } else { 1079 } else {
1099 off8 = MIU_TEST_AGT_RDDATA_LO; 1080 off8 = MIU_TEST_AGT_RDDATA_LO;
1100 if ((stride == 16) && (off & 0xf)) 1081 if (off & 0xf)
1101 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO; 1082 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1102 1083
1103 temp = readl(mem_crb + off8 + 4); 1084 temp = readl(mem_crb + off8 + 4);
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 9d2c124048fa..01ce74ee99f9 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -530,6 +530,22 @@ int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
530 return 0; 530 return 0;
531} 531}
532 532
533void
534qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
535
536 int timeo;
537
538 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DEV_INIT_TIMEOUT, &timeo))
539 timeo = 30;
540
541 adapter->dev_init_timeo = timeo;
542
543 if (qlcnic_rom_fast_read(adapter, QLCNIC_ROM_DRV_RESET_TIMEOUT, &timeo))
544 timeo = 10;
545
546 adapter->reset_ack_timeo = timeo;
547}
548
533static int 549static int
534qlcnic_has_mn(struct qlcnic_adapter *adapter) 550qlcnic_has_mn(struct qlcnic_adapter *adapter)
535{ 551{
@@ -612,7 +628,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
612 return -EINVAL; 628 return -EINVAL;
613 629
614 tab_size = cpu_to_le32(tab_desc->findex) + 630 tab_size = cpu_to_le32(tab_desc->findex) +
615 (cpu_to_le32(tab_desc->entry_size * (idx + 1))); 631 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
616 632
617 if (adapter->fw->size < tab_size) 633 if (adapter->fw->size < tab_size)
618 return -EINVAL; 634 return -EINVAL;
@@ -621,7 +637,7 @@ qlcnic_validate_bootld(struct qlcnic_adapter *adapter)
621 (cpu_to_le32(tab_desc->entry_size) * (idx)); 637 (cpu_to_le32(tab_desc->entry_size) * (idx));
622 descr = (struct uni_data_desc *)&unirom[offs]; 638 descr = (struct uni_data_desc *)&unirom[offs];
623 639
624 data_size = descr->findex + cpu_to_le32(descr->size); 640 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
625 641
626 if (adapter->fw->size < data_size) 642 if (adapter->fw->size < data_size)
627 return -EINVAL; 643 return -EINVAL;
@@ -647,7 +663,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
647 return -EINVAL; 663 return -EINVAL;
648 664
649 tab_size = cpu_to_le32(tab_desc->findex) + 665 tab_size = cpu_to_le32(tab_desc->findex) +
650 (cpu_to_le32(tab_desc->entry_size * (idx + 1))); 666 (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
651 667
652 if (adapter->fw->size < tab_size) 668 if (adapter->fw->size < tab_size)
653 return -EINVAL; 669 return -EINVAL;
@@ -655,7 +671,7 @@ qlcnic_validate_fw(struct qlcnic_adapter *adapter)
655 offs = cpu_to_le32(tab_desc->findex) + 671 offs = cpu_to_le32(tab_desc->findex) +
656 (cpu_to_le32(tab_desc->entry_size) * (idx)); 672 (cpu_to_le32(tab_desc->entry_size) * (idx));
657 descr = (struct uni_data_desc *)&unirom[offs]; 673 descr = (struct uni_data_desc *)&unirom[offs];
658 data_size = descr->findex + cpu_to_le32(descr->size); 674 data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
659 675
660 if (adapter->fw->size < data_size) 676 if (adapter->fw->size < data_size)
661 return -EINVAL; 677 return -EINVAL;
@@ -950,6 +966,16 @@ qlcnic_load_firmware(struct qlcnic_adapter *adapter)
950 966
951 flashaddr += 8; 967 flashaddr += 8;
952 } 968 }
969
970 size = (__force u32)qlcnic_get_fw_size(adapter) % 8;
971 if (size) {
972 data = cpu_to_le64(ptr64[i]);
973
974 if (qlcnic_pci_mem_write_2M(adapter,
975 flashaddr, data))
976 return -EIO;
977 }
978
953 } else { 979 } else {
954 u64 data; 980 u64 data;
955 u32 hi, lo; 981 u32 hi, lo;
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 234dab1f9982..e4fd5dcdfb4c 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -650,7 +650,10 @@ qlcnic_start_firmware(struct qlcnic_adapter *adapter)
650 if (err) 650 if (err)
651 return err; 651 return err;
652 652
653 if (!qlcnic_can_start_firmware(adapter)) 653 err = qlcnic_can_start_firmware(adapter);
654 if (err < 0)
655 return err;
656 else if (!err)
654 goto wait_init; 657 goto wait_init;
655 658
656 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc)); 659 first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
@@ -950,11 +953,11 @@ void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
950 adapter->max_sds_rings = max_sds_rings; 953 adapter->max_sds_rings = max_sds_rings;
951 954
952 if (qlcnic_attach(adapter)) 955 if (qlcnic_attach(adapter))
953 return; 956 goto out;
954 957
955 if (netif_running(netdev)) 958 if (netif_running(netdev))
956 __qlcnic_up(adapter, netdev); 959 __qlcnic_up(adapter, netdev);
957 960out:
958 netif_device_attach(netdev); 961 netif_device_attach(netdev);
959} 962}
960 963
@@ -976,8 +979,10 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
976 adapter->diag_test = test; 979 adapter->diag_test = test;
977 980
978 ret = qlcnic_attach(adapter); 981 ret = qlcnic_attach(adapter);
979 if (ret) 982 if (ret) {
983 netif_device_attach(netdev);
980 return ret; 984 return ret;
985 }
981 986
982 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) { 987 if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
983 for (ring = 0; ring < adapter->max_sds_rings; ring++) { 988 for (ring = 0; ring < adapter->max_sds_rings; ring++) {
@@ -1010,16 +1015,12 @@ qlcnic_reset_context(struct qlcnic_adapter *adapter)
1010 if (netif_running(netdev)) { 1015 if (netif_running(netdev)) {
1011 err = qlcnic_attach(adapter); 1016 err = qlcnic_attach(adapter);
1012 if (!err) 1017 if (!err)
1013 err = __qlcnic_up(adapter, netdev); 1018 __qlcnic_up(adapter, netdev);
1014
1015 if (err)
1016 goto done;
1017 } 1019 }
1018 1020
1019 netif_device_attach(netdev); 1021 netif_device_attach(netdev);
1020 } 1022 }
1021 1023
1022done:
1023 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1024 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1024 return err; 1025 return err;
1025} 1026}
@@ -1139,6 +1140,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1139 goto err_out_iounmap; 1140 goto err_out_iounmap;
1140 } 1141 }
1141 1142
1143 qlcnic_setup_idc_param(adapter);
1142 1144
1143 err = qlcnic_start_firmware(adapter); 1145 err = qlcnic_start_firmware(adapter);
1144 if (err) 1146 if (err)
@@ -1334,6 +1336,7 @@ err_out_detach:
1334 qlcnic_detach(adapter); 1336 qlcnic_detach(adapter);
1335err_out: 1337err_out:
1336 qlcnic_clr_all_drv_state(adapter); 1338 qlcnic_clr_all_drv_state(adapter);
1339 netif_device_attach(netdev);
1337 return err; 1340 return err;
1338} 1341}
1339#endif 1342#endif
@@ -1739,6 +1742,7 @@ static void qlcnic_tx_timeout_task(struct work_struct *work)
1739request_reset: 1742request_reset:
1740 adapter->need_fw_reset = 1; 1743 adapter->need_fw_reset = 1;
1741 clear_bit(__QLCNIC_RESETTING, &adapter->state); 1744 clear_bit(__QLCNIC_RESETTING, &adapter->state);
1745 QLCDB(adapter, DRV, "Resetting adapter\n");
1742} 1746}
1743 1747
1744static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev) 1748static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
@@ -2028,7 +2032,7 @@ static int
2028qlcnic_can_start_firmware(struct qlcnic_adapter *adapter) 2032qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2029{ 2033{
2030 u32 val, prev_state; 2034 u32 val, prev_state;
2031 int cnt = 0; 2035 u8 dev_init_timeo = adapter->dev_init_timeo;
2032 int portnum = adapter->portnum; 2036 int portnum = adapter->portnum;
2033 2037
2034 if (qlcnic_api_lock(adapter)) 2038 if (qlcnic_api_lock(adapter))
@@ -2043,6 +2047,7 @@ qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
2043 } 2047 }
2044 2048
2045 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2049 prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2050 QLCDB(adapter, HW, "Device state = %u\n", prev_state);
2046 2051
2047 switch (prev_state) { 2052 switch (prev_state) {
2048 case QLCNIC_DEV_COLD: 2053 case QLCNIC_DEV_COLD:
@@ -2073,13 +2078,17 @@ start_fw:
2073 } 2078 }
2074 2079
2075 qlcnic_api_unlock(adapter); 2080 qlcnic_api_unlock(adapter);
2076 msleep(1000); 2081
2077 while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) && 2082 do {
2078 ++cnt < 20)
2079 msleep(1000); 2083 msleep(1000);
2084 } while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY)
2085 && --dev_init_timeo);
2080 2086
2081 if (cnt >= 20) 2087 if (!dev_init_timeo) {
2088 dev_err(&adapter->pdev->dev,
2089 "Waiting for device to initialize timeout\n");
2082 return -1; 2090 return -1;
2091 }
2083 2092
2084 if (qlcnic_api_lock(adapter)) 2093 if (qlcnic_api_lock(adapter))
2085 return -1; 2094 return -1;
@@ -2100,17 +2109,16 @@ qlcnic_fwinit_work(struct work_struct *work)
2100 struct qlcnic_adapter, fw_work.work); 2109 struct qlcnic_adapter, fw_work.work);
2101 int dev_state; 2110 int dev_state;
2102 2111
2103 if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
2104 goto err_ret;
2105
2106 if (test_bit(__QLCNIC_START_FW, &adapter->state)) { 2112 if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
2107 2113
2108 if (qlcnic_check_drv_state(adapter)) { 2114 if (qlcnic_check_drv_state(adapter) &&
2115 (adapter->fw_wait_cnt++ < adapter->reset_ack_timeo)) {
2109 qlcnic_schedule_work(adapter, 2116 qlcnic_schedule_work(adapter,
2110 qlcnic_fwinit_work, FW_POLL_DELAY); 2117 qlcnic_fwinit_work, FW_POLL_DELAY);
2111 return; 2118 return;
2112 } 2119 }
2113 2120
2121 QLCDB(adapter, DRV, "Resetting FW\n");
2114 if (!qlcnic_start_firmware(adapter)) { 2122 if (!qlcnic_start_firmware(adapter)) {
2115 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0); 2123 qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
2116 return; 2124 return;
@@ -2119,7 +2127,15 @@ qlcnic_fwinit_work(struct work_struct *work)
2119 goto err_ret; 2127 goto err_ret;
2120 } 2128 }
2121 2129
2130 if (adapter->fw_wait_cnt++ > (adapter->dev_init_timeo / 2)) {
2131 dev_err(&adapter->pdev->dev,
2132 "Waiting for device to reset timeout\n");
2133 goto err_ret;
2134 }
2135
2122 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE); 2136 dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2137 QLCDB(adapter, HW, "Func waiting: Device state=%d\n", dev_state);
2138
2123 switch (dev_state) { 2139 switch (dev_state) {
2124 case QLCNIC_DEV_READY: 2140 case QLCNIC_DEV_READY:
2125 if (!qlcnic_start_firmware(adapter)) { 2141 if (!qlcnic_start_firmware(adapter)) {
@@ -2136,6 +2152,7 @@ qlcnic_fwinit_work(struct work_struct *work)
2136 } 2152 }
2137 2153
2138err_ret: 2154err_ret:
2155 netif_device_attach(adapter->netdev);
2139 qlcnic_clr_all_drv_state(adapter); 2156 qlcnic_clr_all_drv_state(adapter);
2140} 2157}
2141 2158
@@ -2172,6 +2189,9 @@ qlcnic_detach_work(struct work_struct *work)
2172 return; 2189 return;
2173 2190
2174err_ret: 2191err_ret:
2192 dev_err(&adapter->pdev->dev, "detach failed; status=%d temp=%d\n",
2193 status, adapter->temp);
2194 netif_device_attach(netdev);
2175 qlcnic_clr_all_drv_state(adapter); 2195 qlcnic_clr_all_drv_state(adapter);
2176 2196
2177} 2197}
@@ -2189,6 +2209,7 @@ qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
2189 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) { 2209 if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
2190 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET); 2210 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
2191 set_bit(__QLCNIC_START_FW, &adapter->state); 2211 set_bit(__QLCNIC_START_FW, &adapter->state);
2212 QLCDB(adapter, DRV, "NEED_RESET state set\n");
2192 } 2213 }
2193 2214
2194 qlcnic_api_unlock(adapter); 2215 qlcnic_api_unlock(adapter);
@@ -2233,9 +2254,8 @@ qlcnic_attach_work(struct work_struct *work)
2233 qlcnic_config_indev_addr(netdev, NETDEV_UP); 2254 qlcnic_config_indev_addr(netdev, NETDEV_UP);
2234 } 2255 }
2235 2256
2236 netif_device_attach(netdev);
2237
2238done: 2257done:
2258 netif_device_attach(netdev);
2239 adapter->fw_fail_cnt = 0; 2259 adapter->fw_fail_cnt = 0;
2240 clear_bit(__QLCNIC_RESETTING, &adapter->state); 2260 clear_bit(__QLCNIC_RESETTING, &adapter->state);
2241 2261
@@ -2285,8 +2305,11 @@ detach:
2285 QLCNIC_DEV_NEED_RESET; 2305 QLCNIC_DEV_NEED_RESET;
2286 2306
2287 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) && 2307 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2288 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) 2308 !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state)) {
2309
2289 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0); 2310 qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
2311 QLCDB(adapter, DRV, "fw recovery scheduled.\n");
2312 }
2290 2313
2291 return 1; 2314 return 1;
2292} 2315}
@@ -2387,14 +2410,21 @@ static int
2387qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter, 2410qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
2388 loff_t offset, size_t size) 2411 loff_t offset, size_t size)
2389{ 2412{
2413 size_t crb_size = 4;
2414
2390 if (!(adapter->flags & QLCNIC_DIAG_ENABLED)) 2415 if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
2391 return -EIO; 2416 return -EIO;
2392 2417
2393 if ((size != 4) || (offset & 0x3)) 2418 if (offset < QLCNIC_PCI_CRBSPACE) {
2394 return -EINVAL; 2419 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM,
2420 QLCNIC_PCI_CAMQM_END))
2421 crb_size = 8;
2422 else
2423 return -EINVAL;
2424 }
2395 2425
2396 if (offset < QLCNIC_PCI_CRBSPACE) 2426 if ((size != crb_size) || (offset & (crb_size-1)))
2397 return -EINVAL; 2427 return -EINVAL;
2398 2428
2399 return 0; 2429 return 0;
2400} 2430}
@@ -2406,14 +2436,20 @@ qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2406 struct device *dev = container_of(kobj, struct device, kobj); 2436 struct device *dev = container_of(kobj, struct device, kobj);
2407 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 2437 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2408 u32 data; 2438 u32 data;
2439 u64 qmdata;
2409 int ret; 2440 int ret;
2410 2441
2411 ret = qlcnic_sysfs_validate_crb(adapter, offset, size); 2442 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2412 if (ret != 0) 2443 if (ret != 0)
2413 return ret; 2444 return ret;
2414 2445
2415 data = QLCRD32(adapter, offset); 2446 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2416 memcpy(buf, &data, size); 2447 qlcnic_pci_camqm_read_2M(adapter, offset, &qmdata);
2448 memcpy(buf, &qmdata, size);
2449 } else {
2450 data = QLCRD32(adapter, offset);
2451 memcpy(buf, &data, size);
2452 }
2417 return size; 2453 return size;
2418} 2454}
2419 2455
@@ -2424,14 +2460,20 @@ qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2424 struct device *dev = container_of(kobj, struct device, kobj); 2460 struct device *dev = container_of(kobj, struct device, kobj);
2425 struct qlcnic_adapter *adapter = dev_get_drvdata(dev); 2461 struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
2426 u32 data; 2462 u32 data;
2463 u64 qmdata;
2427 int ret; 2464 int ret;
2428 2465
2429 ret = qlcnic_sysfs_validate_crb(adapter, offset, size); 2466 ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
2430 if (ret != 0) 2467 if (ret != 0)
2431 return ret; 2468 return ret;
2432 2469
2433 memcpy(&data, buf, size); 2470 if (ADDR_IN_RANGE(offset, QLCNIC_PCI_CAMQM, QLCNIC_PCI_CAMQM_END)) {
2434 QLCWR32(adapter, offset, data); 2471 memcpy(&qmdata, buf, size);
2472 qlcnic_pci_camqm_write_2M(adapter, offset, qmdata);
2473 } else {
2474 memcpy(&data, buf, size);
2475 QLCWR32(adapter, offset, data);
2476 }
2435 return size; 2477 return size;
2436} 2478}
2437 2479
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 8b742b639ceb..20624ba44a37 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -1344,8 +1344,8 @@ struct oal {
1344}; 1344};
1345 1345
1346struct map_list { 1346struct map_list {
1347 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1347 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1348 DECLARE_PCI_UNMAP_LEN(maplen); 1348 DEFINE_DMA_UNMAP_LEN(maplen);
1349}; 1349};
1350 1350
1351struct tx_ring_desc { 1351struct tx_ring_desc {
@@ -1373,8 +1373,8 @@ struct bq_desc {
1373 } p; 1373 } p;
1374 __le64 *addr; 1374 __le64 *addr;
1375 u32 index; 1375 u32 index;
1376 DECLARE_PCI_UNMAP_ADDR(mapaddr); 1376 DEFINE_DMA_UNMAP_ADDR(mapaddr);
1377 DECLARE_PCI_UNMAP_LEN(maplen); 1377 DEFINE_DMA_UNMAP_LEN(maplen);
1378}; 1378};
1379 1379
1380#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count)) 1380#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 362664628937..68a1c9b91e74 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1340,7 +1340,7 @@ void ql_mpi_core_to_log(struct work_struct *work)
1340 1340
1341 for (i = 0; i < count; i += 8) { 1341 for (i = 0; i < count; i += 8) {
1342 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x " 1342 printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
1343 "%.08x %.08x %.08x \n", i, 1343 "%.08x %.08x %.08x\n", i,
1344 tmp[i + 0], 1344 tmp[i + 0],
1345 tmp[i + 1], 1345 tmp[i + 1],
1346 tmp[i + 2], 1346 tmp[i + 2],
@@ -2058,7 +2058,7 @@ void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb)
2058 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "", 2058 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_LSO ? "LSO" : "",
2059 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "", 2059 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_UC ? "UC" : "",
2060 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : ""); 2060 ob_mac_tso_iocb->flags2 & OB_MAC_TSO_IOCB_TC ? "TC" : "");
2061 printk(KERN_ERR PFX "flags3 = %s %s %s \n", 2061 printk(KERN_ERR PFX "flags3 = %s %s %s\n",
2062 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "", 2062 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_IC ? "IC" : "",
2063 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "", 2063 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_DFP ? "DFP" : "",
2064 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : ""); 2064 ob_mac_tso_iocb->flags3 & OB_MAC_TSO_IOCB_V ? "V" : "");
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fd34f266c0a8..fa4b24c49f42 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -1057,7 +1057,7 @@ static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring); 1057 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1058 1058
1059 pci_dma_sync_single_for_cpu(qdev->pdev, 1059 pci_dma_sync_single_for_cpu(qdev->pdev,
1060 pci_unmap_addr(lbq_desc, mapaddr), 1060 dma_unmap_addr(lbq_desc, mapaddr),
1061 rx_ring->lbq_buf_size, 1061 rx_ring->lbq_buf_size,
1062 PCI_DMA_FROMDEVICE); 1062 PCI_DMA_FROMDEVICE);
1063 1063
@@ -1170,8 +1170,8 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1170 1170
1171 map = lbq_desc->p.pg_chunk.map + 1171 map = lbq_desc->p.pg_chunk.map +
1172 lbq_desc->p.pg_chunk.offset; 1172 lbq_desc->p.pg_chunk.offset;
1173 pci_unmap_addr_set(lbq_desc, mapaddr, map); 1173 dma_unmap_addr_set(lbq_desc, mapaddr, map);
1174 pci_unmap_len_set(lbq_desc, maplen, 1174 dma_unmap_len_set(lbq_desc, maplen,
1175 rx_ring->lbq_buf_size); 1175 rx_ring->lbq_buf_size);
1176 *lbq_desc->addr = cpu_to_le64(map); 1176 *lbq_desc->addr = cpu_to_le64(map);
1177 1177
@@ -1241,8 +1241,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1241 sbq_desc->p.skb = NULL; 1241 sbq_desc->p.skb = NULL;
1242 return; 1242 return;
1243 } 1243 }
1244 pci_unmap_addr_set(sbq_desc, mapaddr, map); 1244 dma_unmap_addr_set(sbq_desc, mapaddr, map);
1245 pci_unmap_len_set(sbq_desc, maplen, 1245 dma_unmap_len_set(sbq_desc, maplen,
1246 rx_ring->sbq_buf_size); 1246 rx_ring->sbq_buf_size);
1247 *sbq_desc->addr = cpu_to_le64(map); 1247 *sbq_desc->addr = cpu_to_le64(map);
1248 } 1248 }
@@ -1298,18 +1298,18 @@ static void ql_unmap_send(struct ql_adapter *qdev,
1298 "unmapping OAL area.\n"); 1298 "unmapping OAL area.\n");
1299 } 1299 }
1300 pci_unmap_single(qdev->pdev, 1300 pci_unmap_single(qdev->pdev,
1301 pci_unmap_addr(&tx_ring_desc->map[i], 1301 dma_unmap_addr(&tx_ring_desc->map[i],
1302 mapaddr), 1302 mapaddr),
1303 pci_unmap_len(&tx_ring_desc->map[i], 1303 dma_unmap_len(&tx_ring_desc->map[i],
1304 maplen), 1304 maplen),
1305 PCI_DMA_TODEVICE); 1305 PCI_DMA_TODEVICE);
1306 } else { 1306 } else {
1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev, 1307 netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1308 "unmapping frag %d.\n", i); 1308 "unmapping frag %d.\n", i);
1309 pci_unmap_page(qdev->pdev, 1309 pci_unmap_page(qdev->pdev,
1310 pci_unmap_addr(&tx_ring_desc->map[i], 1310 dma_unmap_addr(&tx_ring_desc->map[i],
1311 mapaddr), 1311 mapaddr),
1312 pci_unmap_len(&tx_ring_desc->map[i], 1312 dma_unmap_len(&tx_ring_desc->map[i],
1313 maplen), PCI_DMA_TODEVICE); 1313 maplen), PCI_DMA_TODEVICE);
1314 } 1314 }
1315 } 1315 }
@@ -1348,8 +1348,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1348 1348
1349 tbd->len = cpu_to_le32(len); 1349 tbd->len = cpu_to_le32(len);
1350 tbd->addr = cpu_to_le64(map); 1350 tbd->addr = cpu_to_le64(map);
1351 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); 1351 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1352 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len); 1352 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1353 map_idx++; 1353 map_idx++;
1354 1354
1355 /* 1355 /*
@@ -1402,9 +1402,9 @@ static int ql_map_send(struct ql_adapter *qdev,
1402 tbd->len = 1402 tbd->len =
1403 cpu_to_le32((sizeof(struct tx_buf_desc) * 1403 cpu_to_le32((sizeof(struct tx_buf_desc) *
1404 (frag_cnt - frag_idx)) | TX_DESC_C); 1404 (frag_cnt - frag_idx)) | TX_DESC_C);
1405 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, 1405 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1406 map); 1406 map);
1407 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1407 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1408 sizeof(struct oal)); 1408 sizeof(struct oal));
1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal; 1409 tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1410 map_idx++; 1410 map_idx++;
@@ -1425,8 +1425,8 @@ static int ql_map_send(struct ql_adapter *qdev,
1425 1425
1426 tbd->addr = cpu_to_le64(map); 1426 tbd->addr = cpu_to_le64(map);
1427 tbd->len = cpu_to_le32(frag->size); 1427 tbd->len = cpu_to_le32(frag->size);
1428 pci_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map); 1428 dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1429 pci_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, 1429 dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1430 frag->size); 1430 frag->size);
1431 1431
1432 } 1432 }
@@ -1742,8 +1742,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1742 */ 1742 */
1743 sbq_desc = ql_get_curr_sbuf(rx_ring); 1743 sbq_desc = ql_get_curr_sbuf(rx_ring);
1744 pci_unmap_single(qdev->pdev, 1744 pci_unmap_single(qdev->pdev,
1745 pci_unmap_addr(sbq_desc, mapaddr), 1745 dma_unmap_addr(sbq_desc, mapaddr),
1746 pci_unmap_len(sbq_desc, maplen), 1746 dma_unmap_len(sbq_desc, maplen),
1747 PCI_DMA_FROMDEVICE); 1747 PCI_DMA_FROMDEVICE);
1748 skb = sbq_desc->p.skb; 1748 skb = sbq_desc->p.skb;
1749 ql_realign_skb(skb, hdr_len); 1749 ql_realign_skb(skb, hdr_len);
@@ -1774,18 +1774,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1774 */ 1774 */
1775 sbq_desc = ql_get_curr_sbuf(rx_ring); 1775 sbq_desc = ql_get_curr_sbuf(rx_ring);
1776 pci_dma_sync_single_for_cpu(qdev->pdev, 1776 pci_dma_sync_single_for_cpu(qdev->pdev,
1777 pci_unmap_addr 1777 dma_unmap_addr
1778 (sbq_desc, mapaddr), 1778 (sbq_desc, mapaddr),
1779 pci_unmap_len 1779 dma_unmap_len
1780 (sbq_desc, maplen), 1780 (sbq_desc, maplen),
1781 PCI_DMA_FROMDEVICE); 1781 PCI_DMA_FROMDEVICE);
1782 memcpy(skb_put(skb, length), 1782 memcpy(skb_put(skb, length),
1783 sbq_desc->p.skb->data, length); 1783 sbq_desc->p.skb->data, length);
1784 pci_dma_sync_single_for_device(qdev->pdev, 1784 pci_dma_sync_single_for_device(qdev->pdev,
1785 pci_unmap_addr 1785 dma_unmap_addr
1786 (sbq_desc, 1786 (sbq_desc,
1787 mapaddr), 1787 mapaddr),
1788 pci_unmap_len 1788 dma_unmap_len
1789 (sbq_desc, 1789 (sbq_desc,
1790 maplen), 1790 maplen),
1791 PCI_DMA_FROMDEVICE); 1791 PCI_DMA_FROMDEVICE);
@@ -1798,9 +1798,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1798 ql_realign_skb(skb, length); 1798 ql_realign_skb(skb, length);
1799 skb_put(skb, length); 1799 skb_put(skb, length);
1800 pci_unmap_single(qdev->pdev, 1800 pci_unmap_single(qdev->pdev,
1801 pci_unmap_addr(sbq_desc, 1801 dma_unmap_addr(sbq_desc,
1802 mapaddr), 1802 mapaddr),
1803 pci_unmap_len(sbq_desc, 1803 dma_unmap_len(sbq_desc,
1804 maplen), 1804 maplen),
1805 PCI_DMA_FROMDEVICE); 1805 PCI_DMA_FROMDEVICE);
1806 sbq_desc->p.skb = NULL; 1806 sbq_desc->p.skb = NULL;
@@ -1839,9 +1839,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1839 return NULL; 1839 return NULL;
1840 } 1840 }
1841 pci_unmap_page(qdev->pdev, 1841 pci_unmap_page(qdev->pdev,
1842 pci_unmap_addr(lbq_desc, 1842 dma_unmap_addr(lbq_desc,
1843 mapaddr), 1843 mapaddr),
1844 pci_unmap_len(lbq_desc, maplen), 1844 dma_unmap_len(lbq_desc, maplen),
1845 PCI_DMA_FROMDEVICE); 1845 PCI_DMA_FROMDEVICE);
1846 skb_reserve(skb, NET_IP_ALIGN); 1846 skb_reserve(skb, NET_IP_ALIGN);
1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, 1847 netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
@@ -1874,8 +1874,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1874 int size, i = 0; 1874 int size, i = 0;
1875 sbq_desc = ql_get_curr_sbuf(rx_ring); 1875 sbq_desc = ql_get_curr_sbuf(rx_ring);
1876 pci_unmap_single(qdev->pdev, 1876 pci_unmap_single(qdev->pdev,
1877 pci_unmap_addr(sbq_desc, mapaddr), 1877 dma_unmap_addr(sbq_desc, mapaddr),
1878 pci_unmap_len(sbq_desc, maplen), 1878 dma_unmap_len(sbq_desc, maplen),
1879 PCI_DMA_FROMDEVICE); 1879 PCI_DMA_FROMDEVICE);
1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) { 1880 if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1881 /* 1881 /*
@@ -2737,8 +2737,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
2737 } 2737 }
2738 if (sbq_desc->p.skb) { 2738 if (sbq_desc->p.skb) {
2739 pci_unmap_single(qdev->pdev, 2739 pci_unmap_single(qdev->pdev,
2740 pci_unmap_addr(sbq_desc, mapaddr), 2740 dma_unmap_addr(sbq_desc, mapaddr),
2741 pci_unmap_len(sbq_desc, maplen), 2741 dma_unmap_len(sbq_desc, maplen),
2742 PCI_DMA_FROMDEVICE); 2742 PCI_DMA_FROMDEVICE);
2743 dev_kfree_skb(sbq_desc->p.skb); 2743 dev_kfree_skb(sbq_desc->p.skb);
2744 sbq_desc->p.skb = NULL; 2744 sbq_desc->p.skb = NULL;
@@ -4207,7 +4207,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4207static void qlge_set_multicast_list(struct net_device *ndev) 4207static void qlge_set_multicast_list(struct net_device *ndev)
4208{ 4208{
4209 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4209 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
4210 struct dev_mc_list *mc_ptr; 4210 struct netdev_hw_addr *ha;
4211 int i, status; 4211 int i, status;
4212 4212
4213 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK); 4213 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
@@ -4271,8 +4271,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
4271 if (status) 4271 if (status)
4272 goto exit; 4272 goto exit;
4273 i = 0; 4273 i = 0;
4274 netdev_for_each_mc_addr(mc_ptr, ndev) { 4274 netdev_for_each_mc_addr(ha, ndev) {
4275 if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr, 4275 if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4276 MAC_ADDR_TYPE_MULTI_MAC, i)) { 4276 MAC_ADDR_TYPE_MULTI_MAC, i)) {
4277 netif_err(qdev, hw, qdev->ndev, 4277 netif_err(qdev, hw, qdev->ndev,
4278 "Failed to loadmulticast address.\n"); 4278 "Failed to loadmulticast address.\n");
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0298d8c1dcb6..412291645596 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -330,7 +330,7 @@ static int r6040_alloc_rxbufs(struct net_device *dev)
330 do { 330 do {
331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE); 331 skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
332 if (!skb) { 332 if (!skb) {
333 printk(KERN_ERR DRV_NAME "%s: failed to alloc skb for rx\n", dev->name); 333 netdev_err(dev, "failed to alloc skb for rx\n");
334 rc = -ENOMEM; 334 rc = -ENOMEM;
335 goto err_exit; 335 goto err_exit;
336 } 336 }
@@ -410,9 +410,9 @@ static void r6040_tx_timeout(struct net_device *dev)
410 struct r6040_private *priv = netdev_priv(dev); 410 struct r6040_private *priv = netdev_priv(dev);
411 void __iomem *ioaddr = priv->base; 411 void __iomem *ioaddr = priv->base;
412 412
413 printk(KERN_WARNING "%s: transmit timed out, int enable %4.4x " 413 netdev_warn(dev, "transmit timed out, int enable %4.4x "
414 "status %4.4x, PHY status %4.4x\n", 414 "status %4.4x, PHY status %4.4x\n",
415 dev->name, ioread16(ioaddr + MIER), 415 ioread16(ioaddr + MIER),
416 ioread16(ioaddr + MISR), 416 ioread16(ioaddr + MISR),
417 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR)); 417 r6040_mdio_read(dev, priv->mii_if.phy_id, MII_BMSR));
418 418
@@ -897,7 +897,7 @@ static netdev_tx_t r6040_start_xmit(struct sk_buff *skb,
897 if (!lp->tx_free_desc) { 897 if (!lp->tx_free_desc) {
898 spin_unlock_irqrestore(&lp->lock, flags); 898 spin_unlock_irqrestore(&lp->lock, flags);
899 netif_stop_queue(dev); 899 netif_stop_queue(dev);
900 printk(KERN_ERR DRV_NAME ": no tx descriptor\n"); 900 netdev_err(dev, ": no tx descriptor\n");
901 return NETDEV_TX_BUSY; 901 return NETDEV_TX_BUSY;
902 } 902 }
903 903
@@ -937,7 +937,7 @@ static void r6040_multicast_list(struct net_device *dev)
937 u16 *adrp; 937 u16 *adrp;
938 u16 reg; 938 u16 reg;
939 unsigned long flags; 939 unsigned long flags;
940 struct dev_mc_list *dmi; 940 struct netdev_hw_addr *ha;
941 int i; 941 int i;
942 942
943 /* MAC Address */ 943 /* MAC Address */
@@ -972,8 +972,8 @@ static void r6040_multicast_list(struct net_device *dev)
972 for (i = 0; i < 4; i++) 972 for (i = 0; i < 4; i++)
973 hash_table[i] = 0; 973 hash_table[i] = 0;
974 974
975 netdev_for_each_mc_addr(dmi, dev) { 975 netdev_for_each_mc_addr(ha, dev) {
976 char *addrs = dmi->dmi_addr; 976 char *addrs = ha->addr;
977 977
978 if (!(*addrs & 1)) 978 if (!(*addrs & 1))
979 continue; 979 continue;
@@ -990,9 +990,9 @@ static void r6040_multicast_list(struct net_device *dev)
990 } 990 }
991 /* Multicast Address 1~4 case */ 991 /* Multicast Address 1~4 case */
992 i = 0; 992 i = 0;
993 netdev_for_each_mc_addr(dmi, dev) { 993 netdev_for_each_mc_addr(ha, dev) {
994 if (i < MCAST_MAX) { 994 if (i < MCAST_MAX) {
995 adrp = (u16 *) dmi->dmi_addr; 995 adrp = (u16 *) ha->addr;
996 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); 996 iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
997 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); 997 iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
998 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); 998 iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
@@ -1090,20 +1090,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1090 /* this should always be supported */ 1090 /* this should always be supported */
1091 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 1091 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1092 if (err) { 1092 if (err) {
1093 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" 1093 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1094 "not supported by the card\n"); 1094 "not supported by the card\n");
1095 goto err_out; 1095 goto err_out;
1096 } 1096 }
1097 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1097 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1098 if (err) { 1098 if (err) {
1099 printk(KERN_ERR DRV_NAME ": 32-bit PCI DMA addresses" 1099 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1100 "not supported by the card\n"); 1100 "not supported by the card\n");
1101 goto err_out; 1101 goto err_out;
1102 } 1102 }
1103 1103
1104 /* IO Size check */ 1104 /* IO Size check */
1105 if (pci_resource_len(pdev, bar) < io_size) { 1105 if (pci_resource_len(pdev, bar) < io_size) {
1106 printk(KERN_ERR DRV_NAME ": Insufficient PCI resources, aborting\n"); 1106 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1107 err = -EIO; 1107 err = -EIO;
1108 goto err_out; 1108 goto err_out;
1109 } 1109 }
@@ -1112,7 +1112,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1112 1112
1113 dev = alloc_etherdev(sizeof(struct r6040_private)); 1113 dev = alloc_etherdev(sizeof(struct r6040_private));
1114 if (!dev) { 1114 if (!dev) {
1115 printk(KERN_ERR DRV_NAME ": Failed to allocate etherdev\n"); 1115 dev_err(&pdev->dev, "Failed to allocate etherdev\n");
1116 err = -ENOMEM; 1116 err = -ENOMEM;
1117 goto err_out; 1117 goto err_out;
1118 } 1118 }
@@ -1122,14 +1122,13 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1122 err = pci_request_regions(pdev, DRV_NAME); 1122 err = pci_request_regions(pdev, DRV_NAME);
1123 1123
1124 if (err) { 1124 if (err) {
1125 printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n"); 1125 dev_err(&pdev->dev, "Failed to request PCI regions\n");
1126 goto err_out_free_dev; 1126 goto err_out_free_dev;
1127 } 1127 }
1128 1128
1129 ioaddr = pci_iomap(pdev, bar, io_size); 1129 ioaddr = pci_iomap(pdev, bar, io_size);
1130 if (!ioaddr) { 1130 if (!ioaddr) {
1131 printk(KERN_ERR DRV_NAME ": ioremap failed for device %s\n", 1131 dev_err(&pdev->dev, "ioremap failed for device\n");
1132 pci_name(pdev));
1133 err = -EIO; 1132 err = -EIO;
1134 goto err_out_free_res; 1133 goto err_out_free_res;
1135 } 1134 }
@@ -1156,7 +1155,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1156 /* Some bootloader/BIOSes do not initialize 1155 /* Some bootloader/BIOSes do not initialize
1157 * MAC address, warn about that */ 1156 * MAC address, warn about that */
1158 if (!(adrp[0] || adrp[1] || adrp[2])) { 1157 if (!(adrp[0] || adrp[1] || adrp[2])) {
1159 printk(KERN_WARNING DRV_NAME ": MAC address not initialized, generating random\n"); 1158 netdev_warn(dev, "MAC address not initialized, generating random\n");
1160 random_ether_addr(dev->dev_addr); 1159 random_ether_addr(dev->dev_addr);
1161 } 1160 }
1162 1161
@@ -1184,7 +1183,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1184 1183
1185 /* Check the vendor ID on the PHY, if 0xffff assume none attached */ 1184 /* Check the vendor ID on the PHY, if 0xffff assume none attached */
1186 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) { 1185 if (r6040_phy_read(ioaddr, lp->phy_addr, 2) == 0xffff) {
1187 printk(KERN_ERR DRV_NAME ": Failed to detect an attached PHY\n"); 1186 dev_err(&pdev->dev, "Failed to detect an attached PHY\n");
1188 err = -ENODEV; 1187 err = -ENODEV;
1189 goto err_out_unmap; 1188 goto err_out_unmap;
1190 } 1189 }
@@ -1192,7 +1191,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1192 /* Register net device. After this dev->name assign */ 1191 /* Register net device. After this dev->name assign */
1193 err = register_netdev(dev); 1192 err = register_netdev(dev);
1194 if (err) { 1193 if (err) {
1195 printk(KERN_ERR DRV_NAME ": Failed to register net device\n"); 1194 dev_err(&pdev->dev, "Failed to register net device\n");
1196 goto err_out_unmap; 1195 goto err_out_unmap;
1197 } 1196 }
1198 return 0; 1197 return 0;
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index dbb1f5a1824c..340da3915b96 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -23,6 +23,7 @@
23#include <linux/tcp.h> 23#include <linux/tcp.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h>
26 27
27#include <asm/system.h> 28#include <asm/system.h>
28#include <asm/io.h> 29#include <asm/io.h>
@@ -509,6 +510,7 @@ struct rtl8169_private {
509 510
510 struct mii_if_info mii; 511 struct mii_if_info mii;
511 struct rtl8169_counters counters; 512 struct rtl8169_counters counters;
513 u32 saved_wolopts;
512}; 514};
513 515
514MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 516MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@ -748,53 +750,61 @@ static void rtl8169_check_link_status(struct net_device *dev,
748 750
749 spin_lock_irqsave(&tp->lock, flags); 751 spin_lock_irqsave(&tp->lock, flags);
750 if (tp->link_ok(ioaddr)) { 752 if (tp->link_ok(ioaddr)) {
753 /* This is to cancel a scheduled suspend if there's one. */
754 pm_request_resume(&tp->pci_dev->dev);
751 netif_carrier_on(dev); 755 netif_carrier_on(dev);
752 netif_info(tp, ifup, dev, "link up\n"); 756 netif_info(tp, ifup, dev, "link up\n");
753 } else { 757 } else {
754 netif_carrier_off(dev); 758 netif_carrier_off(dev);
755 netif_info(tp, ifdown, dev, "link down\n"); 759 netif_info(tp, ifdown, dev, "link down\n");
760 pm_schedule_suspend(&tp->pci_dev->dev, 100);
756 } 761 }
757 spin_unlock_irqrestore(&tp->lock, flags); 762 spin_unlock_irqrestore(&tp->lock, flags);
758} 763}
759 764
760static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 765#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
766
767static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
761{ 768{
762 struct rtl8169_private *tp = netdev_priv(dev);
763 void __iomem *ioaddr = tp->mmio_addr; 769 void __iomem *ioaddr = tp->mmio_addr;
764 u8 options; 770 u8 options;
765 771 u32 wolopts = 0;
766 wol->wolopts = 0;
767
768#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
769 wol->supported = WAKE_ANY;
770
771 spin_lock_irq(&tp->lock);
772 772
773 options = RTL_R8(Config1); 773 options = RTL_R8(Config1);
774 if (!(options & PMEnable)) 774 if (!(options & PMEnable))
775 goto out_unlock; 775 return 0;
776 776
777 options = RTL_R8(Config3); 777 options = RTL_R8(Config3);
778 if (options & LinkUp) 778 if (options & LinkUp)
779 wol->wolopts |= WAKE_PHY; 779 wolopts |= WAKE_PHY;
780 if (options & MagicPacket) 780 if (options & MagicPacket)
781 wol->wolopts |= WAKE_MAGIC; 781 wolopts |= WAKE_MAGIC;
782 782
783 options = RTL_R8(Config5); 783 options = RTL_R8(Config5);
784 if (options & UWF) 784 if (options & UWF)
785 wol->wolopts |= WAKE_UCAST; 785 wolopts |= WAKE_UCAST;
786 if (options & BWF) 786 if (options & BWF)
787 wol->wolopts |= WAKE_BCAST; 787 wolopts |= WAKE_BCAST;
788 if (options & MWF) 788 if (options & MWF)
789 wol->wolopts |= WAKE_MCAST; 789 wolopts |= WAKE_MCAST;
790 790
791out_unlock: 791 return wolopts;
792 spin_unlock_irq(&tp->lock);
793} 792}
794 793
795static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 794static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
796{ 795{
797 struct rtl8169_private *tp = netdev_priv(dev); 796 struct rtl8169_private *tp = netdev_priv(dev);
797
798 spin_lock_irq(&tp->lock);
799
800 wol->supported = WAKE_ANY;
801 wol->wolopts = __rtl8169_get_wol(tp);
802
803 spin_unlock_irq(&tp->lock);
804}
805
806static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
807{
798 void __iomem *ioaddr = tp->mmio_addr; 808 void __iomem *ioaddr = tp->mmio_addr;
799 unsigned int i; 809 unsigned int i;
800 static const struct { 810 static const struct {
@@ -811,23 +821,29 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
811 { WAKE_ANY, Config5, LanWake } 821 { WAKE_ANY, Config5, LanWake }
812 }; 822 };
813 823
814 spin_lock_irq(&tp->lock);
815
816 RTL_W8(Cfg9346, Cfg9346_Unlock); 824 RTL_W8(Cfg9346, Cfg9346_Unlock);
817 825
818 for (i = 0; i < ARRAY_SIZE(cfg); i++) { 826 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
819 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask; 827 u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
820 if (wol->wolopts & cfg[i].opt) 828 if (wolopts & cfg[i].opt)
821 options |= cfg[i].mask; 829 options |= cfg[i].mask;
822 RTL_W8(cfg[i].reg, options); 830 RTL_W8(cfg[i].reg, options);
823 } 831 }
824 832
825 RTL_W8(Cfg9346, Cfg9346_Lock); 833 RTL_W8(Cfg9346, Cfg9346_Lock);
834}
835
836static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
837{
838 struct rtl8169_private *tp = netdev_priv(dev);
839
840 spin_lock_irq(&tp->lock);
826 841
827 if (wol->wolopts) 842 if (wol->wolopts)
828 tp->features |= RTL_FEATURE_WOL; 843 tp->features |= RTL_FEATURE_WOL;
829 else 844 else
830 tp->features &= ~RTL_FEATURE_WOL; 845 tp->features &= ~RTL_FEATURE_WOL;
846 __rtl8169_set_wol(tp, wol->wolopts);
831 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts); 847 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
832 848
833 spin_unlock_irq(&tp->lock); 849 spin_unlock_irq(&tp->lock);
@@ -1042,14 +1058,14 @@ static void rtl8169_vlan_rx_register(struct net_device *dev,
1042} 1058}
1043 1059
1044static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1060static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1045 struct sk_buff *skb) 1061 struct sk_buff *skb, int polling)
1046{ 1062{
1047 u32 opts2 = le32_to_cpu(desc->opts2); 1063 u32 opts2 = le32_to_cpu(desc->opts2);
1048 struct vlan_group *vlgrp = tp->vlgrp; 1064 struct vlan_group *vlgrp = tp->vlgrp;
1049 int ret; 1065 int ret;
1050 1066
1051 if (vlgrp && (opts2 & RxVlanTag)) { 1067 if (vlgrp && (opts2 & RxVlanTag)) {
1052 vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff)); 1068 __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
1053 ret = 0; 1069 ret = 0;
1054 } else 1070 } else
1055 ret = -1; 1071 ret = -1;
@@ -1066,7 +1082,7 @@ static inline u32 rtl8169_tx_vlan_tag(struct rtl8169_private *tp,
1066} 1082}
1067 1083
1068static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc, 1084static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
1069 struct sk_buff *skb) 1085 struct sk_buff *skb, int polling)
1070{ 1086{
1071 return -1; 1087 return -1;
1072} 1088}
@@ -3187,6 +3203,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3187 3203
3188 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3204 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3189 3205
3206 if (pci_dev_run_wake(pdev)) {
3207 pm_runtime_set_active(&pdev->dev);
3208 pm_runtime_enable(&pdev->dev);
3209 }
3210 pm_runtime_idle(&pdev->dev);
3211
3190out: 3212out:
3191 return rc; 3213 return rc;
3192 3214
@@ -3209,10 +3231,18 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3209 struct net_device *dev = pci_get_drvdata(pdev); 3231 struct net_device *dev = pci_get_drvdata(pdev);
3210 struct rtl8169_private *tp = netdev_priv(dev); 3232 struct rtl8169_private *tp = netdev_priv(dev);
3211 3233
3234 pm_runtime_get_sync(&pdev->dev);
3235
3212 flush_scheduled_work(); 3236 flush_scheduled_work();
3213 3237
3214 unregister_netdev(dev); 3238 unregister_netdev(dev);
3215 3239
3240 if (pci_dev_run_wake(pdev)) {
3241 pm_runtime_disable(&pdev->dev);
3242 pm_runtime_set_suspended(&pdev->dev);
3243 }
3244 pm_runtime_put_noidle(&pdev->dev);
3245
3216 /* restore original MAC address */ 3246 /* restore original MAC address */
3217 rtl_rar_set(tp, dev->perm_addr); 3247 rtl_rar_set(tp, dev->perm_addr);
3218 3248
@@ -3239,6 +3269,7 @@ static int rtl8169_open(struct net_device *dev)
3239 struct pci_dev *pdev = tp->pci_dev; 3269 struct pci_dev *pdev = tp->pci_dev;
3240 int retval = -ENOMEM; 3270 int retval = -ENOMEM;
3241 3271
3272 pm_runtime_get_sync(&pdev->dev);
3242 3273
3243 /* 3274 /*
3244 * Note that we use a magic value here, its wierd I know 3275 * Note that we use a magic value here, its wierd I know
@@ -3259,7 +3290,7 @@ static int rtl8169_open(struct net_device *dev)
3259 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES, 3290 tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
3260 &tp->TxPhyAddr); 3291 &tp->TxPhyAddr);
3261 if (!tp->TxDescArray) 3292 if (!tp->TxDescArray)
3262 goto out; 3293 goto err_pm_runtime_put;
3263 3294
3264 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES, 3295 tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
3265 &tp->RxPhyAddr); 3296 &tp->RxPhyAddr);
@@ -3286,6 +3317,9 @@ static int rtl8169_open(struct net_device *dev)
3286 3317
3287 rtl8169_request_timer(dev); 3318 rtl8169_request_timer(dev);
3288 3319
3320 tp->saved_wolopts = 0;
3321 pm_runtime_put_noidle(&pdev->dev);
3322
3289 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3323 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
3290out: 3324out:
3291 return retval; 3325 return retval;
@@ -3295,9 +3329,13 @@ err_release_ring_2:
3295err_free_rx_1: 3329err_free_rx_1:
3296 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray, 3330 pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
3297 tp->RxPhyAddr); 3331 tp->RxPhyAddr);
3332 tp->RxDescArray = NULL;
3298err_free_tx_0: 3333err_free_tx_0:
3299 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray, 3334 pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
3300 tp->TxPhyAddr); 3335 tp->TxPhyAddr);
3336 tp->TxDescArray = NULL;
3337err_pm_runtime_put:
3338 pm_runtime_put_noidle(&pdev->dev);
3301 goto out; 3339 goto out;
3302} 3340}
3303 3341
@@ -4441,12 +4479,20 @@ out:
4441 return done; 4479 return done;
4442} 4480}
4443 4481
4482/*
4483 * Warning : rtl8169_rx_interrupt() might be called :
4484 * 1) from NAPI (softirq) context
4485 * (polling = 1 : we should call netif_receive_skb())
4486 * 2) from process context (rtl8169_reset_task())
4487 * (polling = 0 : we must call netif_rx() instead)
4488 */
4444static int rtl8169_rx_interrupt(struct net_device *dev, 4489static int rtl8169_rx_interrupt(struct net_device *dev,
4445 struct rtl8169_private *tp, 4490 struct rtl8169_private *tp,
4446 void __iomem *ioaddr, u32 budget) 4491 void __iomem *ioaddr, u32 budget)
4447{ 4492{
4448 unsigned int cur_rx, rx_left; 4493 unsigned int cur_rx, rx_left;
4449 unsigned int delta, count; 4494 unsigned int delta, count;
4495 int polling = (budget != ~(u32)0) ? 1 : 0;
4450 4496
4451 cur_rx = tp->cur_rx; 4497 cur_rx = tp->cur_rx;
4452 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 4498 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
@@ -4508,8 +4554,12 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
4508 skb_put(skb, pkt_size); 4554 skb_put(skb, pkt_size);
4509 skb->protocol = eth_type_trans(skb, dev); 4555 skb->protocol = eth_type_trans(skb, dev);
4510 4556
4511 if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0) 4557 if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
4512 netif_receive_skb(skb); 4558 if (likely(polling))
4559 netif_receive_skb(skb);
4560 else
4561 netif_rx(skb);
4562 }
4513 4563
4514 dev->stats.rx_bytes += pkt_size; 4564 dev->stats.rx_bytes += pkt_size;
4515 dev->stats.rx_packets++; 4565 dev->stats.rx_packets++;
@@ -4704,6 +4754,8 @@ static int rtl8169_close(struct net_device *dev)
4704 struct rtl8169_private *tp = netdev_priv(dev); 4754 struct rtl8169_private *tp = netdev_priv(dev);
4705 struct pci_dev *pdev = tp->pci_dev; 4755 struct pci_dev *pdev = tp->pci_dev;
4706 4756
4757 pm_runtime_get_sync(&pdev->dev);
4758
4707 /* update counters before going down */ 4759 /* update counters before going down */
4708 rtl8169_update_counters(dev); 4760 rtl8169_update_counters(dev);
4709 4761
@@ -4718,6 +4770,8 @@ static int rtl8169_close(struct net_device *dev)
4718 tp->TxDescArray = NULL; 4770 tp->TxDescArray = NULL;
4719 tp->RxDescArray = NULL; 4771 tp->RxDescArray = NULL;
4720 4772
4773 pm_runtime_put_sync(&pdev->dev);
4774
4721 return 0; 4775 return 0;
4722} 4776}
4723 4777
@@ -4743,12 +4797,12 @@ static void rtl_set_rx_mode(struct net_device *dev)
4743 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 4797 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4744 mc_filter[1] = mc_filter[0] = 0xffffffff; 4798 mc_filter[1] = mc_filter[0] = 0xffffffff;
4745 } else { 4799 } else {
4746 struct dev_mc_list *mclist; 4800 struct netdev_hw_addr *ha;
4747 4801
4748 rx_mode = AcceptBroadcast | AcceptMyPhys; 4802 rx_mode = AcceptBroadcast | AcceptMyPhys;
4749 mc_filter[1] = mc_filter[0] = 0; 4803 mc_filter[1] = mc_filter[0] = 0;
4750 netdev_for_each_mc_addr(mclist, dev) { 4804 netdev_for_each_mc_addr(ha, dev) {
4751 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 4805 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4752 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 4806 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4753 rx_mode |= AcceptMulticast; 4807 rx_mode |= AcceptMulticast;
4754 } 4808 }
@@ -4816,21 +4870,74 @@ static int rtl8169_suspend(struct device *device)
4816 return 0; 4870 return 0;
4817} 4871}
4818 4872
4873static void __rtl8169_resume(struct net_device *dev)
4874{
4875 netif_device_attach(dev);
4876 rtl8169_schedule_work(dev, rtl8169_reset_task);
4877}
4878
4819static int rtl8169_resume(struct device *device) 4879static int rtl8169_resume(struct device *device)
4820{ 4880{
4821 struct pci_dev *pdev = to_pci_dev(device); 4881 struct pci_dev *pdev = to_pci_dev(device);
4822 struct net_device *dev = pci_get_drvdata(pdev); 4882 struct net_device *dev = pci_get_drvdata(pdev);
4823 4883
4824 if (!netif_running(dev)) 4884 if (netif_running(dev))
4825 goto out; 4885 __rtl8169_resume(dev);
4826 4886
4827 netif_device_attach(dev); 4887 return 0;
4888}
4889
4890static int rtl8169_runtime_suspend(struct device *device)
4891{
4892 struct pci_dev *pdev = to_pci_dev(device);
4893 struct net_device *dev = pci_get_drvdata(pdev);
4894 struct rtl8169_private *tp = netdev_priv(dev);
4895
4896 if (!tp->TxDescArray)
4897 return 0;
4898
4899 spin_lock_irq(&tp->lock);
4900 tp->saved_wolopts = __rtl8169_get_wol(tp);
4901 __rtl8169_set_wol(tp, WAKE_ANY);
4902 spin_unlock_irq(&tp->lock);
4903
4904 rtl8169_net_suspend(dev);
4828 4905
4829 rtl8169_schedule_work(dev, rtl8169_reset_task);
4830out:
4831 return 0; 4906 return 0;
4832} 4907}
4833 4908
4909static int rtl8169_runtime_resume(struct device *device)
4910{
4911 struct pci_dev *pdev = to_pci_dev(device);
4912 struct net_device *dev = pci_get_drvdata(pdev);
4913 struct rtl8169_private *tp = netdev_priv(dev);
4914
4915 if (!tp->TxDescArray)
4916 return 0;
4917
4918 spin_lock_irq(&tp->lock);
4919 __rtl8169_set_wol(tp, tp->saved_wolopts);
4920 tp->saved_wolopts = 0;
4921 spin_unlock_irq(&tp->lock);
4922
4923 __rtl8169_resume(dev);
4924
4925 return 0;
4926}
4927
4928static int rtl8169_runtime_idle(struct device *device)
4929{
4930 struct pci_dev *pdev = to_pci_dev(device);
4931 struct net_device *dev = pci_get_drvdata(pdev);
4932 struct rtl8169_private *tp = netdev_priv(dev);
4933
4934 if (!tp->TxDescArray)
4935 return 0;
4936
4937 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
4938 return -EBUSY;
4939}
4940
4834static const struct dev_pm_ops rtl8169_pm_ops = { 4941static const struct dev_pm_ops rtl8169_pm_ops = {
4835 .suspend = rtl8169_suspend, 4942 .suspend = rtl8169_suspend,
4836 .resume = rtl8169_resume, 4943 .resume = rtl8169_resume,
@@ -4838,6 +4945,9 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
4838 .thaw = rtl8169_resume, 4945 .thaw = rtl8169_resume,
4839 .poweroff = rtl8169_suspend, 4946 .poweroff = rtl8169_suspend,
4840 .restore = rtl8169_resume, 4947 .restore = rtl8169_resume,
4948 .runtime_suspend = rtl8169_runtime_suspend,
4949 .runtime_resume = rtl8169_runtime_resume,
4950 .runtime_idle = rtl8169_runtime_idle,
4841}; 4951};
4842 4952
4843#define RTL8169_PM_OPS (&rtl8169_pm_ops) 4953#define RTL8169_PM_OPS (&rtl8169_pm_ops)
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 92ae8d3de39b..f155928bf14e 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
2400 return NULL; 2400 return NULL;
2401 } 2401 }
2402 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, 2402 pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
2403 skb->len - skb->data_len, PCI_DMA_TODEVICE); 2403 skb_headlen(skb), PCI_DMA_TODEVICE);
2404 frg_cnt = skb_shinfo(skb)->nr_frags; 2404 frg_cnt = skb_shinfo(skb)->nr_frags;
2405 if (frg_cnt) { 2405 if (frg_cnt) {
2406 txds++; 2406 txds++;
@@ -4202,7 +4202,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
4202 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); 4202 txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
4203 } 4203 }
4204 4204
4205 frg_len = skb->len - skb->data_len; 4205 frg_len = skb_headlen(skb);
4206 if (offload_type == SKB_GSO_UDP) { 4206 if (offload_type == SKB_GSO_UDP) {
4207 int ufo_size; 4207 int ufo_size;
4208 4208
@@ -4965,7 +4965,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4965static void s2io_set_multicast(struct net_device *dev) 4965static void s2io_set_multicast(struct net_device *dev)
4966{ 4966{
4967 int i, j, prev_cnt; 4967 int i, j, prev_cnt;
4968 struct dev_mc_list *mclist; 4968 struct netdev_hw_addr *ha;
4969 struct s2io_nic *sp = netdev_priv(dev); 4969 struct s2io_nic *sp = netdev_priv(dev);
4970 struct XENA_dev_config __iomem *bar0 = sp->bar0; 4970 struct XENA_dev_config __iomem *bar0 = sp->bar0;
4971 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask = 4971 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
@@ -5094,12 +5094,12 @@ static void s2io_set_multicast(struct net_device *dev)
5094 5094
5095 /* Create the new Rx filter list and update the same in H/W. */ 5095 /* Create the new Rx filter list and update the same in H/W. */
5096 i = 0; 5096 i = 0;
5097 netdev_for_each_mc_addr(mclist, dev) { 5097 netdev_for_each_mc_addr(ha, dev) {
5098 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr, 5098 memcpy(sp->usr_addrs[i].addr, ha->addr,
5099 ETH_ALEN); 5099 ETH_ALEN);
5100 mac_addr = 0; 5100 mac_addr = 0;
5101 for (j = 0; j < ETH_ALEN; j++) { 5101 for (j = 0; j < ETH_ALEN; j++) {
5102 mac_addr |= mclist->dmi_addr[j]; 5102 mac_addr |= ha->addr[j];
5103 mac_addr <<= 8; 5103 mac_addr <<= 8;
5104 } 5104 }
5105 mac_addr >>= 8; 5105 mac_addr >>= 8;
diff --git a/drivers/net/s6gmac.c b/drivers/net/s6gmac.c
index 45f26344b368..6b12524ad7c1 100644
--- a/drivers/net/s6gmac.c
+++ b/drivers/net/s6gmac.c
@@ -396,7 +396,6 @@ static void s6gmac_rx_interrupt(struct net_device *dev)
396 } else { 396 } else {
397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN) 397 skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
398 & S6_GMAC_BURST_POSTRD_LEN_MASK); 398 & S6_GMAC_BURST_POSTRD_LEN_MASK);
399 skb->dev = dev;
400 skb->protocol = eth_type_trans(skb, dev); 399 skb->protocol = eth_type_trans(skb, dev);
401 skb->ip_summed = CHECKSUM_UNNECESSARY; 400 skb->ip_summed = CHECKSUM_UNNECESSARY;
402 netif_rx(skb); 401 netif_rx(skb);
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 9944e5d662c0..332031747a23 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2112,7 +2112,7 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2112 uint64_t reg; 2112 uint64_t reg;
2113 void __iomem *port; 2113 void __iomem *port;
2114 int idx; 2114 int idx;
2115 struct dev_mc_list *mclist; 2115 struct netdev_hw_addr *ha;
2116 struct net_device *dev = sc->sbm_dev; 2116 struct net_device *dev = sc->sbm_dev;
2117 2117
2118 /* 2118 /*
@@ -2161,10 +2161,10 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
2161 * XXX if the table overflows */ 2161 * XXX if the table overflows */
2162 2162
2163 idx = 1; /* skip station address */ 2163 idx = 1; /* skip station address */
2164 netdev_for_each_mc_addr(mclist, dev) { 2164 netdev_for_each_mc_addr(ha, dev) {
2165 if (idx == MAC_ADDR_COUNT) 2165 if (idx == MAC_ADDR_COUNT)
2166 break; 2166 break;
2167 reg = sbmac_addr2reg(mclist->dmi_addr); 2167 reg = sbmac_addr2reg(ha->addr);
2168 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t)); 2168 port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
2169 __raw_writeq(reg, port); 2169 __raw_writeq(reg, port);
2170 idx++; 2170 idx++;
@@ -2664,7 +2664,6 @@ static int sbmac_close(struct net_device *dev)
2664static int sbmac_poll(struct napi_struct *napi, int budget) 2664static int sbmac_poll(struct napi_struct *napi, int budget)
2665{ 2665{
2666 struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi); 2666 struct sbmac_softc *sc = container_of(napi, struct sbmac_softc, napi);
2667 struct net_device *dev = sc->sbm_dev;
2668 int work_done; 2667 int work_done;
2669 2668
2670 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1); 2669 work_done = sbdma_rx_process(sc, &(sc->sbm_rxdma), budget, 1);
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index d87c4787fffa..1b3260588933 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -433,13 +433,13 @@ static void _sc92031_set_mar(struct net_device *dev)
433 (dev->flags & IFF_ALLMULTI)) 433 (dev->flags & IFF_ALLMULTI))
434 mar0 = mar1 = 0xffffffff; 434 mar0 = mar1 = 0xffffffff;
435 else if (dev->flags & IFF_MULTICAST) { 435 else if (dev->flags & IFF_MULTICAST) {
436 struct dev_mc_list *mc_list; 436 struct netdev_hw_addr *ha;
437 437
438 netdev_for_each_mc_addr(mc_list, dev) { 438 netdev_for_each_mc_addr(ha, dev) {
439 u32 crc; 439 u32 crc;
440 unsigned bit = 0; 440 unsigned bit = 0;
441 441
442 crc = ~ether_crc(ETH_ALEN, mc_list->dmi_addr); 442 crc = ~ether_crc(ETH_ALEN, ha->addr);
443 crc >>= 24; 443 crc >>= 24;
444 444
445 if (crc & 0x01) bit |= 0x02; 445 if (crc & 0x01) bit |= 0x02;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 6486657c47b8..1ad61b7bba40 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1603,7 +1603,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
1603static void efx_set_multicast_list(struct net_device *net_dev) 1603static void efx_set_multicast_list(struct net_device *net_dev)
1604{ 1604{
1605 struct efx_nic *efx = netdev_priv(net_dev); 1605 struct efx_nic *efx = netdev_priv(net_dev);
1606 struct dev_mc_list *mc_list; 1606 struct netdev_hw_addr *ha;
1607 union efx_multicast_hash *mc_hash = &efx->multicast_hash; 1607 union efx_multicast_hash *mc_hash = &efx->multicast_hash;
1608 u32 crc; 1608 u32 crc;
1609 int bit; 1609 int bit;
@@ -1615,8 +1615,8 @@ static void efx_set_multicast_list(struct net_device *net_dev)
1615 memset(mc_hash, 0xff, sizeof(*mc_hash)); 1615 memset(mc_hash, 0xff, sizeof(*mc_hash));
1616 } else { 1616 } else {
1617 memset(mc_hash, 0x00, sizeof(*mc_hash)); 1617 memset(mc_hash, 0x00, sizeof(*mc_hash));
1618 netdev_for_each_mc_addr(mc_list, net_dev) { 1618 netdev_for_each_mc_addr(ha, net_dev) {
1619 crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); 1619 crc = ether_crc_le(ETH_ALEN, ha->addr);
1620 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); 1620 bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
1621 set_bit_le(bit, mc_hash->byte); 1621 set_bit_le(bit, mc_hash->byte);
1622 } 1622 }
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index b30ce752bbf3..a5d6a6bd0c1a 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -849,13 +849,13 @@ static void sis190_set_rx_mode(struct net_device *dev)
849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 849 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
850 mc_filter[1] = mc_filter[0] = 0xffffffff; 850 mc_filter[1] = mc_filter[0] = 0xffffffff;
851 } else { 851 } else {
852 struct dev_mc_list *mclist; 852 struct netdev_hw_addr *ha;
853 853
854 rx_mode = AcceptBroadcast | AcceptMyPhys; 854 rx_mode = AcceptBroadcast | AcceptMyPhys;
855 mc_filter[1] = mc_filter[0] = 0; 855 mc_filter[1] = mc_filter[0] = 0;
856 netdev_for_each_mc_addr(mclist, dev) { 856 netdev_for_each_mc_addr(ha, dev) {
857 int bit_nr = 857 int bit_nr =
858 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 858 ether_crc(ETH_ALEN, ha->addr) & 0x3f;
859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 859 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
860 rx_mode |= AcceptMulticast; 860 rx_mode |= AcceptMulticast;
861 } 861 }
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index cc0c731c4f09..6293592635be 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1499,7 +1499,7 @@ static void sis900_read_mode(struct net_device *net_dev, int *speed, int *duplex
1499 } 1499 }
1500 1500
1501 if(netif_msg_link(sis_priv)) 1501 if(netif_msg_link(sis_priv))
1502 printk(KERN_INFO "%s: Media Link On %s %s-duplex \n", 1502 printk(KERN_INFO "%s: Media Link On %s %s-duplex\n",
1503 net_dev->name, 1503 net_dev->name,
1504 *speed == HW_SPEED_100_MBPS ? 1504 *speed == HW_SPEED_100_MBPS ?
1505 "100mbps" : "10mbps", 1505 "100mbps" : "10mbps",
@@ -1523,7 +1523,7 @@ static void sis900_tx_timeout(struct net_device *net_dev)
1523 int i; 1523 int i;
1524 1524
1525 if(netif_msg_tx_err(sis_priv)) 1525 if(netif_msg_tx_err(sis_priv))
1526 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x \n", 1526 printk(KERN_INFO "%s: Transmit timeout, status %8.8x %8.8x\n",
1527 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr)); 1527 net_dev->name, inl(ioaddr + cr), inl(ioaddr + isr));
1528 1528
1529 /* Disable interrupts by clearing the interrupt mask. */ 1529 /* Disable interrupts by clearing the interrupt mask. */
@@ -2298,12 +2298,14 @@ static void set_rx_mode(struct net_device *net_dev)
2298 /* Accept Broadcast packet, destination address matchs our 2298 /* Accept Broadcast packet, destination address matchs our
2299 * MAC address, use Receive Filter to reject unwanted MCAST 2299 * MAC address, use Receive Filter to reject unwanted MCAST
2300 * packets */ 2300 * packets */
2301 struct dev_mc_list *mclist; 2301 struct netdev_hw_addr *ha;
2302 rx_mode = RFAAB; 2302 rx_mode = RFAAB;
2303 2303
2304 netdev_for_each_mc_addr(mclist, net_dev) { 2304 netdev_for_each_mc_addr(ha, net_dev) {
2305 unsigned int bit_nr = 2305 unsigned int bit_nr;
2306 sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev); 2306
2307 bit_nr = sis900_mcast_bitnr(ha->addr,
2308 sis_priv->chipset_rev);
2307 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf)); 2309 mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
2308 } 2310 }
2309 } 2311 }
diff --git a/drivers/net/skfp/fplustm.c b/drivers/net/skfp/fplustm.c
index 6028bbb3b28a..9d8d1ac48176 100644
--- a/drivers/net/skfp/fplustm.c
+++ b/drivers/net/skfp/fplustm.c
@@ -1352,7 +1352,7 @@ void rtm_set_timer(struct s_smc *smc)
1352 /* 1352 /*
1353 * MIB timer and hardware timer have the same resolution of 80nS 1353 * MIB timer and hardware timer have the same resolution of 80nS
1354 */ 1354 */
1355 DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns \n", 1355 DB_RMT("RMT: setting new fddiPATHT_Rmode, t = %d ns\n",
1356 (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ; 1356 (int) smc->mib.a[PATH0].fddiPATHT_Rmode,0) ;
1357 outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ; 1357 outpd(ADDR(B2_RTM_INI),smc->mib.a[PATH0].fddiPATHT_Rmode) ;
1358} 1358}
diff --git a/drivers/net/skfp/pcmplc.c b/drivers/net/skfp/pcmplc.c
index e6b33ee05ede..ba45bc794d77 100644
--- a/drivers/net/skfp/pcmplc.c
+++ b/drivers/net/skfp/pcmplc.c
@@ -1277,7 +1277,7 @@ static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
1277 1277
1278 mib = phy->mib ; 1278 mib = phy->mib ;
1279 1279
1280 DB_PCMN(1,"SIG rec %x %x: \n", bit,phy->r_val[bit] ) ; 1280 DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
1281 bit++ ; 1281 bit++ ;
1282 1282
1283 switch(bit) { 1283 switch(bit) {
@@ -1580,7 +1580,7 @@ static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy
1580 mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ; 1580 mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
1581 break ; 1581 break ;
1582 } 1582 }
1583 DB_PCMN(1,"SIG snd %x %x: \n", bit,phy->t_val[bit] ) ; 1583 DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
1584} 1584}
1585 1585
1586/* 1586/*
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index d9016b75abc2..7912606b0bc7 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -852,7 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
852static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev) 852static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
853{ 853{
854 struct s_smc *smc = netdev_priv(dev); 854 struct s_smc *smc = netdev_priv(dev);
855 struct dev_mc_list *dmi; 855 struct netdev_hw_addr *ha;
856 856
857 /* Enable promiscuous mode, if necessary */ 857 /* Enable promiscuous mode, if necessary */
858 if (dev->flags & IFF_PROMISC) { 858 if (dev->flags & IFF_PROMISC) {
@@ -876,13 +876,13 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
876 /* use exact filtering */ 876 /* use exact filtering */
877 877
878 // point to first multicast addr 878 // point to first multicast addr
879 netdev_for_each_mc_addr(dmi, dev) { 879 netdev_for_each_mc_addr(ha, dev) {
880 mac_add_multicast(smc, 880 mac_add_multicast(smc,
881 (struct fddi_addr *)dmi->dmi_addr, 881 (struct fddi_addr *)ha->addr,
882 1); 882 1);
883 883
884 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n", 884 pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
885 dmi->dmi_addr); 885 ha->addr);
886 } 886 }
887 887
888 } else { // more MC addresses than HW supports 888 } else { // more MC addresses than HW supports
diff --git a/drivers/net/skfp/srf.c b/drivers/net/skfp/srf.c
index 6caf713b744c..40882b3faba6 100644
--- a/drivers/net/skfp/srf.c
+++ b/drivers/net/skfp/srf.c
@@ -414,7 +414,7 @@ static void smt_send_srf(struct s_smc *smc)
414 smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ; 414 smt->smt_len = SMT_MAX_INFO_LEN - pcon.pc_len ;
415 mb->sm_len = smt->smt_len + sizeof(struct smt_header) ; 415 mb->sm_len = smt->smt_len + sizeof(struct smt_header) ;
416 416
417 DB_SMT("SRF: sending SRF at %x, len %d \n",smt,mb->sm_len) ; 417 DB_SMT("SRF: sending SRF at %x, len %d\n",smt,mb->sm_len) ;
418 DB_SMT("SRF: state SR%d Threshold %d\n", 418 DB_SMT("SRF: state SR%d Threshold %d\n",
419 smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ; 419 smc->srf.sr_state,smc->srf.SRThreshold/TICKS_PER_SECOND) ;
420#ifdef DEBUG 420#ifdef DEBUG
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 50eb70609f20..96eee8666877 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -2918,7 +2918,7 @@ static void genesis_set_multicast(struct net_device *dev)
2918 struct skge_port *skge = netdev_priv(dev); 2918 struct skge_port *skge = netdev_priv(dev);
2919 struct skge_hw *hw = skge->hw; 2919 struct skge_hw *hw = skge->hw;
2920 int port = skge->port; 2920 int port = skge->port;
2921 struct dev_mc_list *list; 2921 struct netdev_hw_addr *ha;
2922 u32 mode; 2922 u32 mode;
2923 u8 filter[8]; 2923 u8 filter[8];
2924 2924
@@ -2938,8 +2938,8 @@ static void genesis_set_multicast(struct net_device *dev)
2938 skge->flow_status == FLOW_STAT_SYMMETRIC) 2938 skge->flow_status == FLOW_STAT_SYMMETRIC)
2939 genesis_add_filter(filter, pause_mc_addr); 2939 genesis_add_filter(filter, pause_mc_addr);
2940 2940
2941 netdev_for_each_mc_addr(list, dev) 2941 netdev_for_each_mc_addr(ha, dev)
2942 genesis_add_filter(filter, list->dmi_addr); 2942 genesis_add_filter(filter, ha->addr);
2943 } 2943 }
2944 2944
2945 xm_write32(hw, port, XM_MODE, mode); 2945 xm_write32(hw, port, XM_MODE, mode);
@@ -2957,7 +2957,7 @@ static void yukon_set_multicast(struct net_device *dev)
2957 struct skge_port *skge = netdev_priv(dev); 2957 struct skge_port *skge = netdev_priv(dev);
2958 struct skge_hw *hw = skge->hw; 2958 struct skge_hw *hw = skge->hw;
2959 int port = skge->port; 2959 int port = skge->port;
2960 struct dev_mc_list *list; 2960 struct netdev_hw_addr *ha;
2961 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND || 2961 int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
2962 skge->flow_status == FLOW_STAT_SYMMETRIC); 2962 skge->flow_status == FLOW_STAT_SYMMETRIC);
2963 u16 reg; 2963 u16 reg;
@@ -2980,8 +2980,8 @@ static void yukon_set_multicast(struct net_device *dev)
2980 if (rx_pause) 2980 if (rx_pause)
2981 yukon_add_filter(filter, pause_mc_addr); 2981 yukon_add_filter(filter, pause_mc_addr);
2982 2982
2983 netdev_for_each_mc_addr(list, dev) 2983 netdev_for_each_mc_addr(ha, dev)
2984 yukon_add_filter(filter, list->dmi_addr); 2984 yukon_add_filter(filter, ha->addr);
2985 } 2985 }
2986 2986
2987 2987
@@ -3667,7 +3667,7 @@ static int skge_debug_show(struct seq_file *seq, void *v)
3667 t->csum_offs, t->csum_write, t->csum_start); 3667 t->csum_offs, t->csum_write, t->csum_start);
3668 } 3668 }
3669 3669
3670 seq_printf(seq, "\nRx Ring: \n"); 3670 seq_printf(seq, "\nRx Ring:\n");
3671 for (e = skge->rx_ring.to_clean; ; e = e->next) { 3671 for (e = skge->rx_ring.to_clean; ; e = e->next) {
3672 const struct skge_rx_desc *r = e->desc; 3672 const struct skge_rx_desc *r = e->desc;
3673 3673
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 088c797eb73b..3a086d3a7cbf 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -227,7 +227,7 @@ static void sky2_power_on(struct sky2_hw *hw)
227 /* disable Core Clock Division, */ 227 /* disable Core Clock Division, */
228 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS); 228 sky2_write32(hw, B2_Y2_CLK_CTRL, Y2_CLK_DIV_DIS);
229 229
230 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 230 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
231 /* enable bits are inverted */ 231 /* enable bits are inverted */
232 sky2_write8(hw, B2_Y2_CLK_GATE, 232 sky2_write8(hw, B2_Y2_CLK_GATE,
233 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS | 233 Y2_PCI_CLK_LNK1_DIS | Y2_COR_CLK_LNK1_DIS |
@@ -269,7 +269,7 @@ static void sky2_power_on(struct sky2_hw *hw)
269 269
270static void sky2_power_aux(struct sky2_hw *hw) 270static void sky2_power_aux(struct sky2_hw *hw)
271{ 271{
272 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 272 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
273 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 273 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
274 else 274 else
275 /* enable bits are inverted */ 275 /* enable bits are inverted */
@@ -652,7 +652,7 @@ static void sky2_phy_power_up(struct sky2_hw *hw, unsigned port)
652 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1); 652 reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
653 reg1 &= ~phy_power[port]; 653 reg1 &= ~phy_power[port];
654 654
655 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > 1) 655 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev > CHIP_REV_YU_XL_A1)
656 reg1 |= coma_mode[port]; 656 reg1 |= coma_mode[port];
657 657
658 sky2_pci_write32(hw, PCI_DEV_REG1, reg1); 658 sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
@@ -824,7 +824,9 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
824 824
825 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR); 825 sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_RST_CLR);
826 826
827 if (hw->chip_id == CHIP_ID_YUKON_XL && hw->chip_rev == 0 && port == 1) { 827 if (hw->chip_id == CHIP_ID_YUKON_XL &&
828 hw->chip_rev == CHIP_REV_YU_XL_A0 &&
829 port == 1) {
828 /* WA DEV_472 -- looks like crossed wires on port 2 */ 830 /* WA DEV_472 -- looks like crossed wires on port 2 */
829 /* clear GMAC 1 Control reset */ 831 /* clear GMAC 1 Control reset */
830 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR); 832 sky2_write8(hw, SK_REG(0, GMAC_CTRL), GMC_RST_CLR);
@@ -878,6 +880,10 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
878 if (hw->dev[port]->mtu > ETH_DATA_LEN) 880 if (hw->dev[port]->mtu > ETH_DATA_LEN)
879 reg |= GM_SMOD_JUMBO_ENA; 881 reg |= GM_SMOD_JUMBO_ENA;
880 882
883 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
884 hw->chip_rev == CHIP_REV_YU_EC_U_B1)
885 reg |= GM_NEW_FLOW_CTRL;
886
881 gma_write16(hw, port, GM_SERIAL_MODE, reg); 887 gma_write16(hw, port, GM_SERIAL_MODE, reg);
882 888
883 /* virtual address for data */ 889 /* virtual address for data */
@@ -1414,8 +1420,7 @@ static void sky2_rx_start(struct sky2_port *sky2)
1414 /* These chips have no ram buffer? 1420 /* These chips have no ram buffer?
1415 * MAC Rx RAM Read is controlled by hardware */ 1421 * MAC Rx RAM Read is controlled by hardware */
1416 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 1422 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1417 (hw->chip_rev == CHIP_REV_YU_EC_U_A1 || 1423 hw->chip_rev > CHIP_REV_YU_EC_U_A0)
1418 hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1419 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS); 1424 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1420 1425
1421 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 1426 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
@@ -2142,7 +2147,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
2142 istatus, phystat); 2147 istatus, phystat);
2143 2148
2144 if (istatus & PHY_M_IS_AN_COMPL) { 2149 if (istatus & PHY_M_IS_AN_COMPL) {
2145 if (sky2_autoneg_done(sky2, phystat) == 0) 2150 if (sky2_autoneg_done(sky2, phystat) == 0 &&
2151 !netif_carrier_ok(dev))
2146 sky2_link_up(sky2); 2152 sky2_link_up(sky2);
2147 goto out; 2153 goto out;
2148 } 2154 }
@@ -3622,7 +3628,7 @@ static void sky2_set_multicast(struct net_device *dev)
3622 struct sky2_port *sky2 = netdev_priv(dev); 3628 struct sky2_port *sky2 = netdev_priv(dev);
3623 struct sky2_hw *hw = sky2->hw; 3629 struct sky2_hw *hw = sky2->hw;
3624 unsigned port = sky2->port; 3630 unsigned port = sky2->port;
3625 struct dev_mc_list *list; 3631 struct netdev_hw_addr *ha;
3626 u16 reg; 3632 u16 reg;
3627 u8 filter[8]; 3633 u8 filter[8];
3628 int rx_pause; 3634 int rx_pause;
@@ -3646,8 +3652,8 @@ static void sky2_set_multicast(struct net_device *dev)
3646 if (rx_pause) 3652 if (rx_pause)
3647 sky2_add_filter(filter, pause_mc_addr); 3653 sky2_add_filter(filter, pause_mc_addr);
3648 3654
3649 netdev_for_each_mc_addr(list, dev) 3655 netdev_for_each_mc_addr(ha, dev)
3650 sky2_add_filter(filter, list->dmi_addr); 3656 sky2_add_filter(filter, ha->addr);
3651 } 3657 }
3652 3658
3653 gma_write16(hw, port, GM_MC_ADDR_H1, 3659 gma_write16(hw, port, GM_MC_ADDR_H1,
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index a5e182dd9819..0bebfb3638f6 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -548,6 +548,14 @@ enum {
548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ 548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */ 549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
550}; 550};
551
552enum yukon_xl_rev {
553 CHIP_REV_YU_XL_A0 = 0,
554 CHIP_REV_YU_XL_A1 = 1,
555 CHIP_REV_YU_XL_A2 = 2,
556 CHIP_REV_YU_XL_A3 = 3,
557};
558
551enum yukon_ec_rev { 559enum yukon_ec_rev {
552 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 560 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
553 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */ 561 CHIP_REV_YU_EC_A2 = 1, /* Chip Rev. for Yukon-EC A2 */
@@ -557,6 +565,7 @@ enum yukon_ec_u_rev {
557 CHIP_REV_YU_EC_U_A0 = 1, 565 CHIP_REV_YU_EC_U_A0 = 1,
558 CHIP_REV_YU_EC_U_A1 = 2, 566 CHIP_REV_YU_EC_U_A1 = 2,
559 CHIP_REV_YU_EC_U_B0 = 3, 567 CHIP_REV_YU_EC_U_B0 = 3,
568 CHIP_REV_YU_EC_U_B1 = 5,
560}; 569};
561enum yukon_fe_rev { 570enum yukon_fe_rev {
562 CHIP_REV_YU_FE_A1 = 1, 571 CHIP_REV_YU_FE_A1 = 1,
@@ -1775,10 +1784,13 @@ enum {
1775/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */ 1784/* GM_SERIAL_MODE 16 bit r/w Serial Mode Register */
1776enum { 1785enum {
1777 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */ 1786 GM_SMOD_DATABL_MSK = 0x1f<<11, /* Bit 15..11: Data Blinder (r/o) */
1778 GM_SMOD_LIMIT_4 = 1<<10, /* Bit 10: 4 consecutive Tx trials */ 1787 GM_SMOD_LIMIT_4 = 1<<10, /* 4 consecutive Tx trials */
1779 GM_SMOD_VLAN_ENA = 1<<9, /* Bit 9: Enable VLAN (Max. Frame Len) */ 1788 GM_SMOD_VLAN_ENA = 1<<9, /* Enable VLAN (Max. Frame Len) */
1780 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ 1789 GM_SMOD_JUMBO_ENA = 1<<8, /* Enable Jumbo (Max. Frame Len) */
1781 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ 1790
1791 GM_NEW_FLOW_CTRL = 1<<6, /* Enable New Flow-Control */
1792
1793 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
1782}; 1794};
1783 1795
1784#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) 1796#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 635820d42b19..1e49fcfa28ab 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -382,7 +382,7 @@ static inline void smc911x_rcv(struct net_device *dev)
382 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 382 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
383 dev->name, __func__); 383 dev->name, __func__);
384 status = SMC_GET_RX_STS_FIFO(lp); 384 status = SMC_GET_RX_STS_FIFO(lp);
385 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 385 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x\n",
386 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); 386 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
387 pkt_len = (status & RX_STS_PKT_LEN_) >> 16; 387 pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
388 if (status & RX_STS_ES_) { 388 if (status & RX_STS_ES_) {
@@ -1135,7 +1135,7 @@ static irqreturn_t smc911x_interrupt(int irq, void *dev_id)
1135 } 1135 }
1136#else 1136#else
1137 if (status & INT_STS_TSFL_) { 1137 if (status & INT_STS_TSFL_) {
1138 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, ); 1138 DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq\n", dev->name, );
1139 smc911x_tx(dev); 1139 smc911x_tx(dev);
1140 SMC_ACK_INT(lp, INT_STS_TSFL_); 1140 SMC_ACK_INT(lp, INT_STS_TSFL_);
1141 } 1141 }
@@ -1274,7 +1274,7 @@ static void smc911x_timeout(struct net_device *dev)
1274 status = SMC_GET_INT(lp); 1274 status = SMC_GET_INT(lp);
1275 mask = SMC_GET_INT_EN(lp); 1275 mask = SMC_GET_INT_EN(lp);
1276 spin_unlock_irqrestore(&lp->lock, flags); 1276 spin_unlock_irqrestore(&lp->lock, flags);
1277 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n", 1277 DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x\n",
1278 dev->name, status, mask); 1278 dev->name, status, mask);
1279 1279
1280 /* Dump the current TX FIFO contents and restart */ 1280 /* Dump the current TX FIFO contents and restart */
@@ -1340,7 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1340 * within that register. 1340 * within that register.
1341 */ 1341 */
1342 else if (!netdev_mc_empty(dev)) { 1342 else if (!netdev_mc_empty(dev)) {
1343 struct dev_mc_list *cur_addr; 1343 struct netdev_hw_addr *ha;
1344 1344
1345 /* Set the Hash perfec mode */ 1345 /* Set the Hash perfec mode */
1346 mcr |= MAC_CR_HPFILT_; 1346 mcr |= MAC_CR_HPFILT_;
@@ -1348,19 +1348,16 @@ static void smc911x_set_multicast_list(struct net_device *dev)
1348 /* start with a table of all zeros: reject all */ 1348 /* start with a table of all zeros: reject all */
1349 memset(multicast_table, 0, sizeof(multicast_table)); 1349 memset(multicast_table, 0, sizeof(multicast_table));
1350 1350
1351 netdev_for_each_mc_addr(cur_addr, dev) { 1351 netdev_for_each_mc_addr(ha, dev) {
1352 u32 position; 1352 u32 position;
1353 1353
1354 /* do we have a pointer here? */
1355 if (!cur_addr)
1356 break;
1357 /* make sure this is a multicast address - 1354 /* make sure this is a multicast address -
1358 shouldn't this be a given if we have it here ? */ 1355 shouldn't this be a given if we have it here ? */
1359 if (!(*cur_addr->dmi_addr & 1)) 1356 if (!(*ha->addr & 1))
1360 continue; 1357 continue;
1361 1358
1362 /* upper 6 bits are used as hash index */ 1359 /* upper 6 bits are used as hash index */
1363 position = ether_crc(ETH_ALEN, cur_addr->dmi_addr)>>26; 1360 position = ether_crc(ETH_ALEN, ha->addr)>>26;
1364 1361
1365 multicast_table[position>>5] |= 1 << (position&0x1f); 1362 multicast_table[position>>5] |= 1 << (position&0x1f);
1366 } 1363 }
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 3f2f7843aa4e..e94521cf70a1 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -416,7 +416,7 @@ static void smc_shutdown( int ioaddr )
416 416
417 417
418/* 418/*
419 . Function: smc_setmulticast( int ioaddr, int count, dev_mc_list * adds ) 419 . Function: smc_setmulticast( int ioaddr, struct net_device *dev )
420 . Purpose: 420 . Purpose:
421 . This sets the internal hardware table to filter out unwanted multicast 421 . This sets the internal hardware table to filter out unwanted multicast
422 . packets before they take up memory. 422 . packets before they take up memory.
@@ -437,26 +437,23 @@ static void smc_setmulticast(int ioaddr, struct net_device *dev)
437{ 437{
438 int i; 438 int i;
439 unsigned char multicast_table[ 8 ]; 439 unsigned char multicast_table[ 8 ];
440 struct dev_mc_list *cur_addr; 440 struct netdev_hw_addr *ha;
441 /* table for flipping the order of 3 bits */ 441 /* table for flipping the order of 3 bits */
442 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 }; 442 unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
443 443
444 /* start with a table of all zeros: reject all */ 444 /* start with a table of all zeros: reject all */
445 memset( multicast_table, 0, sizeof( multicast_table ) ); 445 memset( multicast_table, 0, sizeof( multicast_table ) );
446 446
447 netdev_for_each_mc_addr(cur_addr, dev) { 447 netdev_for_each_mc_addr(ha, dev) {
448 int position; 448 int position;
449 449
450 /* do we have a pointer here? */
451 if ( !cur_addr )
452 break;
453 /* make sure this is a multicast address - shouldn't this 450 /* make sure this is a multicast address - shouldn't this
454 be a given if we have it here ? */ 451 be a given if we have it here ? */
455 if ( !( *cur_addr->dmi_addr & 1 ) ) 452 if (!(*ha->addr & 1))
456 continue; 453 continue;
457 454
458 /* only use the low order bits */ 455 /* only use the low order bits */
459 position = ether_crc_le(6, cur_addr->dmi_addr) & 0x3f; 456 position = ether_crc_le(6, ha->addr) & 0x3f;
460 457
461 /* do some messy swapping to put the bit in the right spot */ 458 /* do some messy swapping to put the bit in the right spot */
462 multicast_table[invert3[position&7]] |= 459 multicast_table[invert3[position&7]] |=
@@ -528,7 +525,7 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
528 numPages = ((length & 0xfffe) + 6) / 256; 525 numPages = ((length & 0xfffe) + 6) / 256;
529 526
530 if (numPages > 7 ) { 527 if (numPages > 7 ) {
531 printk(CARDNAME": Far too big packet error. \n"); 528 printk(CARDNAME": Far too big packet error.\n");
532 /* freeing the packet is a good thing here... but should 529 /* freeing the packet is a good thing here... but should
533 . any packets of this size get down here? */ 530 . any packets of this size get down here? */
534 dev_kfree_skb (skb); 531 dev_kfree_skb (skb);
@@ -570,9 +567,9 @@ static netdev_tx_t smc_wait_to_send_packet(struct sk_buff *skb,
570 if ( !time_out ) { 567 if ( !time_out ) {
571 /* oh well, wait until the chip finds memory later */ 568 /* oh well, wait until the chip finds memory later */
572 SMC_ENABLE_INT( IM_ALLOC_INT ); 569 SMC_ENABLE_INT( IM_ALLOC_INT );
573 PRINTK2((CARDNAME": memory allocation deferred. \n")); 570 PRINTK2((CARDNAME": memory allocation deferred.\n"));
574 /* it's deferred, but I'll handle it later */ 571 /* it's deferred, but I'll handle it later */
575 return NETDEV_TX_OK; 572 return NETDEV_TX_OK;
576 } 573 }
577 /* or YES! I can send the packet now.. */ 574 /* or YES! I can send the packet now.. */
578 smc_hardware_send_packet(dev); 575 smc_hardware_send_packet(dev);
@@ -610,7 +607,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
610 ioaddr = dev->base_addr; 607 ioaddr = dev->base_addr;
611 608
612 if ( !skb ) { 609 if ( !skb ) {
613 PRINTK((CARDNAME": In XMIT with no packet to send \n")); 610 PRINTK((CARDNAME": In XMIT with no packet to send\n"));
614 return; 611 return;
615 } 612 }
616 length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN; 613 length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
@@ -620,7 +617,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
620 packet_no = inb( ioaddr + PNR_ARR + 1 ); 617 packet_no = inb( ioaddr + PNR_ARR + 1 );
621 if ( packet_no & 0x80 ) { 618 if ( packet_no & 0x80 ) {
622 /* or isn't there? BAD CHIP! */ 619 /* or isn't there? BAD CHIP! */
623 printk(KERN_DEBUG CARDNAME": Memory allocation failed. \n"); 620 printk(KERN_DEBUG CARDNAME": Memory allocation failed.\n");
624 dev_kfree_skb_any(skb); 621 dev_kfree_skb_any(skb);
625 lp->saved_skb = NULL; 622 lp->saved_skb = NULL;
626 netif_wake_queue(dev); 623 netif_wake_queue(dev);
@@ -685,7 +682,7 @@ static void smc_hardware_send_packet( struct net_device * dev )
685 /* and let the chipset deal with it */ 682 /* and let the chipset deal with it */
686 outw( MC_ENQUEUE , ioaddr + MMU_CMD ); 683 outw( MC_ENQUEUE , ioaddr + MMU_CMD );
687 684
688 PRINTK2((CARDNAME": Sent packet of length %d \n",length)); 685 PRINTK2((CARDNAME": Sent packet of length %d\n", length));
689 686
690 lp->saved_skb = NULL; 687 lp->saved_skb = NULL;
691 dev_kfree_skb_any (skb); 688 dev_kfree_skb_any (skb);
@@ -937,7 +934,7 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
937 if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) { 934 if ( !chip_ids[ ( revision_register >> 4 ) & 0xF ] ) {
938 /* I don't recognize this chip, so... */ 935 /* I don't recognize this chip, so... */
939 printk(CARDNAME ": IO %x: Unrecognized revision register:" 936 printk(CARDNAME ": IO %x: Unrecognized revision register:"
940 " %x, Contact author. \n", ioaddr, revision_register ); 937 " %x, Contact author.\n", ioaddr, revision_register);
941 938
942 retval = -ENODEV; 939 retval = -ENODEV;
943 goto err_out; 940 goto err_out;
@@ -1074,7 +1071,7 @@ static void print_packet( byte * buf, int length )
1074 int remainder; 1071 int remainder;
1075 int lines; 1072 int lines;
1076 1073
1077 printk("Packet of length %d \n", length ); 1074 printk("Packet of length %d\n", length);
1078 lines = length / 16; 1075 lines = length / 16;
1079 remainder = length % 16; 1076 remainder = length % 16;
1080 1077
@@ -1201,7 +1198,7 @@ static void smc_rcv(struct net_device *dev)
1201 1198
1202 if ( packet_number & FP_RXEMPTY ) { 1199 if ( packet_number & FP_RXEMPTY ) {
1203 /* we got called , but nothing was on the FIFO */ 1200 /* we got called , but nothing was on the FIFO */
1204 PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO. \n")); 1201 PRINTK((CARDNAME ": WARNING: smc_rcv with nothing on FIFO.\n"));
1205 /* don't need to restore anything */ 1202 /* don't need to restore anything */
1206 return; 1203 return;
1207 } 1204 }
@@ -1257,14 +1254,14 @@ static void smc_rcv(struct net_device *dev)
1257 to send the DWORDs or the bytes first, or some 1254 to send the DWORDs or the bytes first, or some
1258 mixture. A mixture might improve already slow PIO 1255 mixture. A mixture might improve already slow PIO
1259 performance */ 1256 performance */
1260 PRINTK3((" Reading %d dwords (and %d bytes) \n", 1257 PRINTK3((" Reading %d dwords (and %d bytes)\n",
1261 packet_length >> 2, packet_length & 3 )); 1258 packet_length >> 2, packet_length & 3 ));
1262 insl(ioaddr + DATA_1 , data, packet_length >> 2 ); 1259 insl(ioaddr + DATA_1 , data, packet_length >> 2 );
1263 /* read the left over bytes */ 1260 /* read the left over bytes */
1264 insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC), 1261 insb( ioaddr + DATA_1, data + (packet_length & 0xFFFFFC),
1265 packet_length & 0x3 ); 1262 packet_length & 0x3 );
1266#else 1263#else
1267 PRINTK3((" Reading %d words and %d byte(s) \n", 1264 PRINTK3((" Reading %d words and %d byte(s)\n",
1268 (packet_length >> 1 ), packet_length & 1 )); 1265 (packet_length >> 1 ), packet_length & 1 ));
1269 insw(ioaddr + DATA_1 , data, packet_length >> 1); 1266 insw(ioaddr + DATA_1 , data, packet_length >> 1);
1270 if ( packet_length & 1 ) { 1267 if ( packet_length & 1 ) {
@@ -1333,7 +1330,7 @@ static void smc_tx( struct net_device * dev )
1333 outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER ); 1330 outw( PTR_AUTOINC | PTR_READ, ioaddr + POINTER );
1334 1331
1335 tx_status = inw( ioaddr + DATA_1 ); 1332 tx_status = inw( ioaddr + DATA_1 );
1336 PRINTK3((CARDNAME": TX DONE STATUS: %4x \n", tx_status )); 1333 PRINTK3((CARDNAME": TX DONE STATUS: %4x\n", tx_status));
1337 1334
1338 dev->stats.tx_errors++; 1335 dev->stats.tx_errors++;
1339 if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++; 1336 if ( tx_status & TS_LOSTCAR ) dev->stats.tx_carrier_errors++;
@@ -1347,7 +1344,7 @@ static void smc_tx( struct net_device * dev )
1347#endif 1344#endif
1348 1345
1349 if ( tx_status & TS_SUCCESS ) { 1346 if ( tx_status & TS_SUCCESS ) {
1350 printk(CARDNAME": Successful packet caused interrupt \n"); 1347 printk(CARDNAME": Successful packet caused interrupt\n");
1351 } 1348 }
1352 /* re-enable transmit */ 1349 /* re-enable transmit */
1353 SMC_SELECT_BANK( 0 ); 1350 SMC_SELECT_BANK( 0 );
@@ -1393,7 +1390,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1393 int handled = 0; 1390 int handled = 0;
1394 1391
1395 1392
1396 PRINTK3((CARDNAME": SMC interrupt started \n")); 1393 PRINTK3((CARDNAME": SMC interrupt started\n"));
1397 1394
1398 saved_bank = inw( ioaddr + BANK_SELECT ); 1395 saved_bank = inw( ioaddr + BANK_SELECT );
1399 1396
@@ -1408,7 +1405,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1408 /* set a timeout value, so I don't stay here forever */ 1405 /* set a timeout value, so I don't stay here forever */
1409 timeout = 4; 1406 timeout = 4;
1410 1407
1411 PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x \n", mask )); 1408 PRINTK2((KERN_WARNING CARDNAME ": MASK IS %x\n", mask));
1412 do { 1409 do {
1413 /* read the status flag, and mask it */ 1410 /* read the status flag, and mask it */
1414 status = inb( ioaddr + INTERRUPT ) & mask; 1411 status = inb( ioaddr + INTERRUPT ) & mask;
@@ -1418,7 +1415,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1418 handled = 1; 1415 handled = 1;
1419 1416
1420 PRINTK3((KERN_WARNING CARDNAME 1417 PRINTK3((KERN_WARNING CARDNAME
1421 ": Handling interrupt status %x \n", status )); 1418 ": Handling interrupt status %x\n", status));
1422 1419
1423 if (status & IM_RCV_INT) { 1420 if (status & IM_RCV_INT) {
1424 /* Got a packet(s). */ 1421 /* Got a packet(s). */
@@ -1452,7 +1449,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1452 1449
1453 } else if (status & IM_ALLOC_INT ) { 1450 } else if (status & IM_ALLOC_INT ) {
1454 PRINTK2((KERN_DEBUG CARDNAME 1451 PRINTK2((KERN_DEBUG CARDNAME
1455 ": Allocation interrupt \n")); 1452 ": Allocation interrupt\n"));
1456 /* clear this interrupt so it doesn't happen again */ 1453 /* clear this interrupt so it doesn't happen again */
1457 mask &= ~IM_ALLOC_INT; 1454 mask &= ~IM_ALLOC_INT;
1458 1455
@@ -1470,9 +1467,9 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1470 dev->stats.rx_fifo_errors++; 1467 dev->stats.rx_fifo_errors++;
1471 outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT ); 1468 outb( IM_RX_OVRN_INT, ioaddr + INTERRUPT );
1472 } else if (status & IM_EPH_INT ) { 1469 } else if (status & IM_EPH_INT ) {
1473 PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT \n")); 1470 PRINTK((CARDNAME ": UNSUPPORTED: EPH INTERRUPT\n"));
1474 } else if (status & IM_ERCV_INT ) { 1471 } else if (status & IM_ERCV_INT ) {
1475 PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT \n")); 1472 PRINTK((CARDNAME ": UNSUPPORTED: ERCV INTERRUPT\n"));
1476 outb( IM_ERCV_INT, ioaddr + INTERRUPT ); 1473 outb( IM_ERCV_INT, ioaddr + INTERRUPT );
1477 } 1474 }
1478 } while ( timeout -- ); 1475 } while ( timeout -- );
@@ -1482,7 +1479,7 @@ static irqreturn_t smc_interrupt(int irq, void * dev_id)
1482 SMC_SELECT_BANK( 2 ); 1479 SMC_SELECT_BANK( 2 );
1483 outb( mask, ioaddr + INT_MASK ); 1480 outb( mask, ioaddr + INT_MASK );
1484 1481
1485 PRINTK3(( KERN_WARNING CARDNAME ": MASK is now %x \n", mask )); 1482 PRINTK3((KERN_WARNING CARDNAME ": MASK is now %x\n", mask));
1486 outw( saved_pointer, ioaddr + POINTER ); 1483 outw( saved_pointer, ioaddr + POINTER );
1487 1484
1488 SMC_SELECT_BANK( saved_bank ); 1485 SMC_SELECT_BANK( saved_bank );
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 860339d51d58..682bc4fe604f 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1285,7 +1285,7 @@ static irqreturn_t smc_interrupt(int irq, void *dev_id)
1285 smc_phy_interrupt(dev); 1285 smc_phy_interrupt(dev);
1286 } else if (status & IM_ERCV_INT) { 1286 } else if (status & IM_ERCV_INT) {
1287 SMC_ACK_INT(lp, IM_ERCV_INT); 1287 SMC_ACK_INT(lp, IM_ERCV_INT);
1288 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT \n", dev->name); 1288 PRINTK("%s: UNSUPPORTED: ERCV INTERRUPT\n", dev->name);
1289 } 1289 }
1290 } while (--timeout); 1290 } while (--timeout);
1291 1291
@@ -1412,7 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
1412 * within that register. 1412 * within that register.
1413 */ 1413 */
1414 else if (!netdev_mc_empty(dev)) { 1414 else if (!netdev_mc_empty(dev)) {
1415 struct dev_mc_list *cur_addr; 1415 struct netdev_hw_addr *ha;
1416 1416
1417 /* table for flipping the order of 3 bits */ 1417 /* table for flipping the order of 3 bits */
1418 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7}; 1418 static const unsigned char invert3[] = {0, 4, 2, 6, 1, 5, 3, 7};
@@ -1420,16 +1420,16 @@ static void smc_set_multicast_list(struct net_device *dev)
1420 /* start with a table of all zeros: reject all */ 1420 /* start with a table of all zeros: reject all */
1421 memset(multicast_table, 0, sizeof(multicast_table)); 1421 memset(multicast_table, 0, sizeof(multicast_table));
1422 1422
1423 netdev_for_each_mc_addr(cur_addr, dev) { 1423 netdev_for_each_mc_addr(ha, dev) {
1424 int position; 1424 int position;
1425 1425
1426 /* make sure this is a multicast address - 1426 /* make sure this is a multicast address -
1427 shouldn't this be a given if we have it here ? */ 1427 shouldn't this be a given if we have it here ? */
1428 if (!(*cur_addr->dmi_addr & 1)) 1428 if (!(*ha->addr & 1))
1429 continue; 1429 continue;
1430 1430
1431 /* only use the low order bits */ 1431 /* only use the low order bits */
1432 position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f; 1432 position = crc32_le(~0, ha->addr, 6) & 0x3f;
1433 1433
1434 /* do some messy swapping to put the bit in the right spot */ 1434 /* do some messy swapping to put the bit in the right spot */
1435 multicast_table[invert3[position&7]] |= 1435 multicast_table[invert3[position&7]] |=
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index cbf520d38eac..746fb91a0fb0 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -1382,13 +1382,13 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
1382 /* Enabling specific multicast addresses */ 1382 /* Enabling specific multicast addresses */
1383 unsigned int hash_high = 0; 1383 unsigned int hash_high = 0;
1384 unsigned int hash_low = 0; 1384 unsigned int hash_low = 0;
1385 struct dev_mc_list *mc_list; 1385 struct netdev_hw_addr *ha;
1386 1386
1387 pdata->set_bits_mask = MAC_CR_HPFILT_; 1387 pdata->set_bits_mask = MAC_CR_HPFILT_;
1388 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_); 1388 pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
1389 1389
1390 netdev_for_each_mc_addr(mc_list, dev) { 1390 netdev_for_each_mc_addr(ha, dev) {
1391 unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr); 1391 unsigned int bitnum = smsc911x_hash(ha->addr);
1392 unsigned int mask = 0x01 << (bitnum & 0x1F); 1392 unsigned int mask = 0x01 << (bitnum & 0x1F);
1393 1393
1394 if (bitnum & 0x20) 1394 if (bitnum & 0x20)
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index aafaebf45748..ada05c452175 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -1064,12 +1064,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
1064 mac_cr |= MAC_CR_MCPAS_; 1064 mac_cr |= MAC_CR_MCPAS_;
1065 mac_cr &= (~MAC_CR_HPFILT_); 1065 mac_cr &= (~MAC_CR_HPFILT_);
1066 } else if (!netdev_mc_empty(dev)) { 1066 } else if (!netdev_mc_empty(dev)) {
1067 struct dev_mc_list *mc_list; 1067 struct netdev_hw_addr *ha;
1068 u32 hash_lo = 0, hash_hi = 0; 1068 u32 hash_lo = 0, hash_hi = 0;
1069 1069
1070 smsc_dbg(HW, "Multicast filter enabled"); 1070 smsc_dbg(HW, "Multicast filter enabled");
1071 netdev_for_each_mc_addr(mc_list, dev) { 1071 netdev_for_each_mc_addr(ha, dev) {
1072 u32 bit_num = smsc9420_hash(mc_list->dmi_addr); 1072 u32 bit_num = smsc9420_hash(ha->addr);
1073 u32 mask = 1 << (bit_num & 0x1F); 1073 u32 mask = 1 << (bit_num & 0x1F);
1074 1074
1075 if (bit_num & 0x20) 1075 if (bit_num & 0x20)
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 287c251075e5..e5d67327d707 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev)
531{ 531{
532 struct sonic_local *lp = netdev_priv(dev); 532 struct sonic_local *lp = netdev_priv(dev);
533 unsigned int rcr; 533 unsigned int rcr;
534 struct dev_mc_list *dmi; 534 struct netdev_hw_addr *ha;
535 unsigned char *addr; 535 unsigned char *addr;
536 int i; 536 int i;
537 537
@@ -550,8 +550,8 @@ static void sonic_multicast_list(struct net_device *dev)
550 netdev_mc_count(dev)); 550 netdev_mc_count(dev));
551 sonic_set_cam_enable(dev, 1); /* always enable our own address */ 551 sonic_set_cam_enable(dev, 1); /* always enable our own address */
552 i = 1; 552 i = 1;
553 netdev_for_each_mc_addr(dmi, dev) { 553 netdev_for_each_mc_addr(ha, dev) {
554 addr = dmi->dmi_addr; 554 addr = ha->addr;
555 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]); 555 sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
556 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]); 556 sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
557 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]); 557 sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index dd3cb0f2d21f..3dff280b438b 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -625,7 +625,7 @@ spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
625static void 625static void
626spider_net_set_multi(struct net_device *netdev) 626spider_net_set_multi(struct net_device *netdev)
627{ 627{
628 struct dev_mc_list *mc; 628 struct netdev_hw_addr *ha;
629 u8 hash; 629 u8 hash;
630 int i; 630 int i;
631 u32 reg; 631 u32 reg;
@@ -646,8 +646,8 @@ spider_net_set_multi(struct net_device *netdev)
646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */ 646 hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
647 set_bit(0xfd, bitmask); 647 set_bit(0xfd, bitmask);
648 648
649 netdev_for_each_mc_addr(mc, netdev) { 649 netdev_for_each_mc_addr(ha, netdev) {
650 hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr); 650 hash = spider_net_get_multicast_hash(netdev, ha->addr);
651 set_bit(hash, bitmask); 651 set_bit(hash, bitmask);
652 } 652 }
653 653
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 6dfa69899019..8a6d27cdc0bd 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1766,7 +1766,7 @@ static void set_rx_mode(struct net_device *dev)
1766 struct netdev_private *np = netdev_priv(dev); 1766 struct netdev_private *np = netdev_priv(dev);
1767 void __iomem *ioaddr = np->base; 1767 void __iomem *ioaddr = np->base;
1768 u32 rx_mode = MinVLANPrio; 1768 u32 rx_mode = MinVLANPrio;
1769 struct dev_mc_list *mclist; 1769 struct netdev_hw_addr *ha;
1770 int i; 1770 int i;
1771#ifdef VLAN_SUPPORT 1771#ifdef VLAN_SUPPORT
1772 1772
@@ -1804,8 +1804,8 @@ static void set_rx_mode(struct net_device *dev)
1804 /* Use the 16 element perfect filter, skip first two entries. */ 1804 /* Use the 16 element perfect filter, skip first two entries. */
1805 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16; 1805 void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
1806 __be16 *eaddrs; 1806 __be16 *eaddrs;
1807 netdev_for_each_mc_addr(mclist, dev) { 1807 netdev_for_each_mc_addr(ha, dev) {
1808 eaddrs = (__be16 *)mclist->dmi_addr; 1808 eaddrs = (__be16 *) ha->addr;
1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4; 1809 writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4; 1810 writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
1811 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8; 1811 writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
@@ -1825,10 +1825,10 @@ static void set_rx_mode(struct net_device *dev)
1825 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */ 1825 __le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
1826 1826
1827 memset(mc_filter, 0, sizeof(mc_filter)); 1827 memset(mc_filter, 0, sizeof(mc_filter));
1828 netdev_for_each_mc_addr(mclist, dev) { 1828 netdev_for_each_mc_addr(ha, dev) {
1829 /* The chip uses the upper 9 CRC bits 1829 /* The chip uses the upper 9 CRC bits
1830 as index into the hash table */ 1830 as index into the hash table */
1831 int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23; 1831 int bit_nr = ether_crc_le(ETH_ALEN, ha->addr) >> 23;
1832 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1]; 1832 __le32 *fptr = (__le32 *) &mc_filter[(bit_nr >> 4) & ~1];
1833 1833
1834 *fptr |= cpu_to_le32(1 << (bit_nr & 31)); 1834 *fptr |= cpu_to_le32(1 << (bit_nr & 31));
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index c776af15fe1a..9691733ddb8e 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -2,4 +2,4 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o 2stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \ 3stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \ 4 dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
5 dwmac100.o $(stmmac-y) 5 dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index 2a58172e986a..144f76fd3e39 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -22,8 +22,26 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include "descs.h"
26#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
27#define STMMAC_VLAN_TAG_USED
28#include <linux/if_vlan.h>
29#endif
30
31#include "descs.h"
32
33#undef CHIP_DEBUG_PRINT
34/* Turn-on extra printk debug for MAC core, dma and descriptors */
35/* #define CHIP_DEBUG_PRINT */
36
37#ifdef CHIP_DEBUG_PRINT
38#define CHIP_DBG(fmt, args...) printk(fmt, ## args)
39#else
40#define CHIP_DBG(fmt, args...) do { } while (0)
41#endif
42
43#undef FRAME_FILTER_DEBUG
44/* #define FRAME_FILTER_DEBUG */
27 45
28struct stmmac_extra_stats { 46struct stmmac_extra_stats {
29 /* Transmit errors */ 47 /* Transmit errors */
@@ -231,3 +249,4 @@ extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
231 unsigned int high, unsigned int low); 249 unsigned int high, unsigned int low);
232extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr, 250extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
233 unsigned int high, unsigned int low); 251 unsigned int high, unsigned int low);
252extern void dwmac_dma_flush_tx_fifo(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac100.c b/drivers/net/stmmac/dwmac100.c
deleted file mode 100644
index 4cacca614fc1..000000000000
--- a/drivers/net/stmmac/dwmac100.c
+++ /dev/null
@@ -1,538 +0,0 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9
10 This program is free software; you can redistribute it and/or modify it
11 under the terms and conditions of the GNU General Public License,
12 version 2, as published by the Free Software Foundation.
13
14 This program is distributed in the hope it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 more details.
18
19 You should have received a copy of the GNU General Public License along with
20 this program; if not, write to the Free Software Foundation, Inc.,
21 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22
23 The full GNU General Public License is included in this distribution in
24 the file called "COPYING".
25
26 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
27*******************************************************************************/
28
29#include <linux/crc32.h>
30#include <linux/mii.h>
31#include <linux/phy.h>
32#include <linux/slab.h>
33
34#include "common.h"
35#include "dwmac100.h"
36#include "dwmac_dma.h"
37
38#undef DWMAC100_DEBUG
39/*#define DWMAC100_DEBUG*/
40#ifdef DWMAC100_DEBUG
41#define DBG(fmt, args...) printk(fmt, ## args)
42#else
43#define DBG(fmt, args...) do { } while (0)
44#endif
45
46static void dwmac100_core_init(unsigned long ioaddr)
47{
48 u32 value = readl(ioaddr + MAC_CONTROL);
49
50 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
51
52#ifdef STMMAC_VLAN_TAG_USED
53 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
54#endif
55 return;
56}
57
58static void dwmac100_dump_mac_regs(unsigned long ioaddr)
59{
60 pr_info("\t----------------------------------------------\n"
61 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
62 "\t----------------------------------------------\n",
63 (unsigned int)ioaddr);
64 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
65 readl(ioaddr + MAC_CONTROL));
66 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
67 readl(ioaddr + MAC_ADDR_HIGH));
68 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
69 readl(ioaddr + MAC_ADDR_LOW));
70 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
71 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
72 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
73 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
74 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
75 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
76 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
77 readl(ioaddr + MAC_VLAN1));
78 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
79 readl(ioaddr + MAC_VLAN2));
80 pr_info("\n\tMAC management counter registers\n");
81 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
82 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
83 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
84 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
85 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
86 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
87 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
88 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
89 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
90 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
91 return;
92}
93
94static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
95 u32 dma_rx)
96{
97 u32 value = readl(ioaddr + DMA_BUS_MODE);
98 /* DMA SW reset */
99 value |= DMA_BUS_MODE_SFT_RESET;
100 writel(value, ioaddr + DMA_BUS_MODE);
101 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
102
103 /* Enable Application Access by writing to DMA CSR0 */
104 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
105 ioaddr + DMA_BUS_MODE);
106
107 /* Mask interrupts by writing to CSR7 */
108 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
109
110 /* The base address of the RX/TX descriptor lists must be written into
111 * DMA CSR3 and CSR4, respectively. */
112 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
113 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
114
115 return 0;
116}
117
118/* Store and Forward capability is not used at all..
119 * The transmit threshold can be programmed by
120 * setting the TTC bits in the DMA control register.*/
121static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
122 int rxmode)
123{
124 u32 csr6 = readl(ioaddr + DMA_CONTROL);
125
126 if (txmode <= 32)
127 csr6 |= DMA_CONTROL_TTC_32;
128 else if (txmode <= 64)
129 csr6 |= DMA_CONTROL_TTC_64;
130 else
131 csr6 |= DMA_CONTROL_TTC_128;
132
133 writel(csr6, ioaddr + DMA_CONTROL);
134
135 return;
136}
137
138static void dwmac100_dump_dma_regs(unsigned long ioaddr)
139{
140 int i;
141
142 DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
143 for (i = 0; i < 9; i++)
144 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
145 (DMA_BUS_MODE + i * 4),
146 readl(ioaddr + DMA_BUS_MODE + i * 4));
147 DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
148 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
149 DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
150 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
151 return;
152}
153
154/* DMA controller has two counters to track the number of
155 * the receive missed frames. */
156static void dwmac100_dma_diagnostic_fr(void *data,
157 struct stmmac_extra_stats *x,
158 unsigned long ioaddr)
159{
160 struct net_device_stats *stats = (struct net_device_stats *)data;
161 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
162
163 if (unlikely(csr8)) {
164 if (csr8 & DMA_MISSED_FRAME_OVE) {
165 stats->rx_over_errors += 0x800;
166 x->rx_overflow_cntr += 0x800;
167 } else {
168 unsigned int ove_cntr;
169 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
170 stats->rx_over_errors += ove_cntr;
171 x->rx_overflow_cntr += ove_cntr;
172 }
173
174 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
175 stats->rx_missed_errors += 0xffff;
176 x->rx_missed_cntr += 0xffff;
177 } else {
178 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
179 stats->rx_missed_errors += miss_f;
180 x->rx_missed_cntr += miss_f;
181 }
182 }
183 return;
184}
185
186static int dwmac100_get_tx_frame_status(void *data,
187 struct stmmac_extra_stats *x,
188 struct dma_desc *p, unsigned long ioaddr)
189{
190 int ret = 0;
191 struct net_device_stats *stats = (struct net_device_stats *)data;
192
193 if (unlikely(p->des01.tx.error_summary)) {
194 if (unlikely(p->des01.tx.underflow_error)) {
195 x->tx_underflow++;
196 stats->tx_fifo_errors++;
197 }
198 if (unlikely(p->des01.tx.no_carrier)) {
199 x->tx_carrier++;
200 stats->tx_carrier_errors++;
201 }
202 if (unlikely(p->des01.tx.loss_carrier)) {
203 x->tx_losscarrier++;
204 stats->tx_carrier_errors++;
205 }
206 if (unlikely((p->des01.tx.excessive_deferral) ||
207 (p->des01.tx.excessive_collisions) ||
208 (p->des01.tx.late_collision)))
209 stats->collisions += p->des01.tx.collision_count;
210 ret = -1;
211 }
212 if (unlikely(p->des01.tx.heartbeat_fail)) {
213 x->tx_heartbeat++;
214 stats->tx_heartbeat_errors++;
215 ret = -1;
216 }
217 if (unlikely(p->des01.tx.deferred))
218 x->tx_deferred++;
219
220 return ret;
221}
222
223static int dwmac100_get_tx_len(struct dma_desc *p)
224{
225 return p->des01.tx.buffer1_size;
226}
227
228/* This function verifies if each incoming frame has some errors
229 * and, if required, updates the multicast statistics.
230 * In case of success, it returns csum_none becasue the device
231 * is not able to compute the csum in HW. */
232static int dwmac100_get_rx_frame_status(void *data,
233 struct stmmac_extra_stats *x,
234 struct dma_desc *p)
235{
236 int ret = csum_none;
237 struct net_device_stats *stats = (struct net_device_stats *)data;
238
239 if (unlikely(p->des01.rx.last_descriptor == 0)) {
240 pr_warning("dwmac100 Error: Oversized Ethernet "
241 "frame spanned multiple buffers\n");
242 stats->rx_length_errors++;
243 return discard_frame;
244 }
245
246 if (unlikely(p->des01.rx.error_summary)) {
247 if (unlikely(p->des01.rx.descriptor_error))
248 x->rx_desc++;
249 if (unlikely(p->des01.rx.partial_frame_error))
250 x->rx_partial++;
251 if (unlikely(p->des01.rx.run_frame))
252 x->rx_runt++;
253 if (unlikely(p->des01.rx.frame_too_long))
254 x->rx_toolong++;
255 if (unlikely(p->des01.rx.collision)) {
256 x->rx_collision++;
257 stats->collisions++;
258 }
259 if (unlikely(p->des01.rx.crc_error)) {
260 x->rx_crc++;
261 stats->rx_crc_errors++;
262 }
263 ret = discard_frame;
264 }
265 if (unlikely(p->des01.rx.dribbling))
266 ret = discard_frame;
267
268 if (unlikely(p->des01.rx.length_error)) {
269 x->rx_length++;
270 ret = discard_frame;
271 }
272 if (unlikely(p->des01.rx.mii_error)) {
273 x->rx_mii++;
274 ret = discard_frame;
275 }
276 if (p->des01.rx.multicast_frame) {
277 x->rx_multicast++;
278 stats->multicast++;
279 }
280 return ret;
281}
282
283static void dwmac100_irq_status(unsigned long ioaddr)
284{
285 return;
286}
287
288static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
289 unsigned int reg_n)
290{
291 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
292}
293
294static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
295 unsigned int reg_n)
296{
297 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
298}
299
300static void dwmac100_set_filter(struct net_device *dev)
301{
302 unsigned long ioaddr = dev->base_addr;
303 u32 value = readl(ioaddr + MAC_CONTROL);
304
305 if (dev->flags & IFF_PROMISC) {
306 value |= MAC_CONTROL_PR;
307 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
308 MAC_CONTROL_HP);
309 } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
310 || (dev->flags & IFF_ALLMULTI)) {
311 value |= MAC_CONTROL_PM;
312 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
313 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
314 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
315 } else if (netdev_mc_empty(dev)) { /* no multicast */
316 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
317 MAC_CONTROL_HO | MAC_CONTROL_HP);
318 } else {
319 u32 mc_filter[2];
320 struct dev_mc_list *mclist;
321
322 /* Perfect filter mode for physical address and Hash
323 filter for multicast */
324 value |= MAC_CONTROL_HP;
325 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
326 MAC_CONTROL_IF | MAC_CONTROL_HO);
327
328 memset(mc_filter, 0, sizeof(mc_filter));
329 netdev_for_each_mc_addr(mclist, dev) {
330 /* The upper 6 bits of the calculated CRC are used to
331 * index the contens of the hash table */
332 int bit_nr =
333 ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
334 /* The most significant bit determines the register to
335 * use (H/L) while the other 5 bits determine the bit
336 * within the register. */
337 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
338 }
339 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
340 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
341 }
342
343 writel(value, ioaddr + MAC_CONTROL);
344
345 DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
346 "HI 0x%08x, LO 0x%08x\n",
347 __func__, readl(ioaddr + MAC_CONTROL),
348 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
349 return;
350}
351
352static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
353 unsigned int fc, unsigned int pause_time)
354{
355 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
356
357 if (duplex)
358 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
359 writel(flow, ioaddr + MAC_FLOW_CTRL);
360
361 return;
362}
363
364/* No PMT module supported for this Ethernet Controller.
365 * Tested on ST platforms only.
366 */
367static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
368{
369 return;
370}
371
372static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
373 int disable_rx_ic)
374{
375 int i;
376 for (i = 0; i < ring_size; i++) {
377 p->des01.rx.own = 1;
378 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
379 if (i == ring_size - 1)
380 p->des01.rx.end_ring = 1;
381 if (disable_rx_ic)
382 p->des01.rx.disable_ic = 1;
383 p++;
384 }
385 return;
386}
387
388static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
389{
390 int i;
391 for (i = 0; i < ring_size; i++) {
392 p->des01.tx.own = 0;
393 if (i == ring_size - 1)
394 p->des01.tx.end_ring = 1;
395 p++;
396 }
397 return;
398}
399
400static int dwmac100_get_tx_owner(struct dma_desc *p)
401{
402 return p->des01.tx.own;
403}
404
405static int dwmac100_get_rx_owner(struct dma_desc *p)
406{
407 return p->des01.rx.own;
408}
409
410static void dwmac100_set_tx_owner(struct dma_desc *p)
411{
412 p->des01.tx.own = 1;
413}
414
415static void dwmac100_set_rx_owner(struct dma_desc *p)
416{
417 p->des01.rx.own = 1;
418}
419
420static int dwmac100_get_tx_ls(struct dma_desc *p)
421{
422 return p->des01.tx.last_segment;
423}
424
425static void dwmac100_release_tx_desc(struct dma_desc *p)
426{
427 int ter = p->des01.tx.end_ring;
428
429 /* clean field used within the xmit */
430 p->des01.tx.first_segment = 0;
431 p->des01.tx.last_segment = 0;
432 p->des01.tx.buffer1_size = 0;
433
434 /* clean status reported */
435 p->des01.tx.error_summary = 0;
436 p->des01.tx.underflow_error = 0;
437 p->des01.tx.no_carrier = 0;
438 p->des01.tx.loss_carrier = 0;
439 p->des01.tx.excessive_deferral = 0;
440 p->des01.tx.excessive_collisions = 0;
441 p->des01.tx.late_collision = 0;
442 p->des01.tx.heartbeat_fail = 0;
443 p->des01.tx.deferred = 0;
444
445 /* set termination field */
446 p->des01.tx.end_ring = ter;
447
448 return;
449}
450
451static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
452 int csum_flag)
453{
454 p->des01.tx.first_segment = is_fs;
455 p->des01.tx.buffer1_size = len;
456}
457
458static void dwmac100_clear_tx_ic(struct dma_desc *p)
459{
460 p->des01.tx.interrupt = 0;
461}
462
463static void dwmac100_close_tx_desc(struct dma_desc *p)
464{
465 p->des01.tx.last_segment = 1;
466 p->des01.tx.interrupt = 1;
467}
468
469static int dwmac100_get_rx_frame_len(struct dma_desc *p)
470{
471 return p->des01.rx.frame_length;
472}
473
474struct stmmac_ops dwmac100_ops = {
475 .core_init = dwmac100_core_init,
476 .dump_regs = dwmac100_dump_mac_regs,
477 .host_irq_status = dwmac100_irq_status,
478 .set_filter = dwmac100_set_filter,
479 .flow_ctrl = dwmac100_flow_ctrl,
480 .pmt = dwmac100_pmt,
481 .set_umac_addr = dwmac100_set_umac_addr,
482 .get_umac_addr = dwmac100_get_umac_addr,
483};
484
485struct stmmac_dma_ops dwmac100_dma_ops = {
486 .init = dwmac100_dma_init,
487 .dump_regs = dwmac100_dump_dma_regs,
488 .dma_mode = dwmac100_dma_operation_mode,
489 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
490 .enable_dma_transmission = dwmac_enable_dma_transmission,
491 .enable_dma_irq = dwmac_enable_dma_irq,
492 .disable_dma_irq = dwmac_disable_dma_irq,
493 .start_tx = dwmac_dma_start_tx,
494 .stop_tx = dwmac_dma_stop_tx,
495 .start_rx = dwmac_dma_start_rx,
496 .stop_rx = dwmac_dma_stop_rx,
497 .dma_interrupt = dwmac_dma_interrupt,
498};
499
500struct stmmac_desc_ops dwmac100_desc_ops = {
501 .tx_status = dwmac100_get_tx_frame_status,
502 .rx_status = dwmac100_get_rx_frame_status,
503 .get_tx_len = dwmac100_get_tx_len,
504 .init_rx_desc = dwmac100_init_rx_desc,
505 .init_tx_desc = dwmac100_init_tx_desc,
506 .get_tx_owner = dwmac100_get_tx_owner,
507 .get_rx_owner = dwmac100_get_rx_owner,
508 .release_tx_desc = dwmac100_release_tx_desc,
509 .prepare_tx_desc = dwmac100_prepare_tx_desc,
510 .clear_tx_ic = dwmac100_clear_tx_ic,
511 .close_tx_desc = dwmac100_close_tx_desc,
512 .get_tx_ls = dwmac100_get_tx_ls,
513 .set_tx_owner = dwmac100_set_tx_owner,
514 .set_rx_owner = dwmac100_set_rx_owner,
515 .get_rx_frame_len = dwmac100_get_rx_frame_len,
516};
517
518struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
519{
520 struct mac_device_info *mac;
521
522 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
523
524 pr_info("\tDWMAC100\n");
525
526 mac->mac = &dwmac100_ops;
527 mac->desc = &dwmac100_desc_ops;
528 mac->dma = &dwmac100_dma_ops;
529
530 mac->pmt = PMT_NOT_SUPPORTED;
531 mac->link.port = MAC_CONTROL_PS;
532 mac->link.duplex = MAC_CONTROL_F;
533 mac->link.speed = 0;
534 mac->mii.addr = MAC_MII_ADDR;
535 mac->mii.data = MAC_MII_DATA;
536
537 return mac;
538}
diff --git a/drivers/net/stmmac/dwmac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004a..97956cbf1cb4 100644
--- a/drivers/net/stmmac/dwmac100.h
+++ b/drivers/net/stmmac/dwmac100.h
@@ -22,6 +22,9 @@
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/ 23*******************************************************************************/
24 24
25#include <linux/phy.h>
26#include "common.h"
27
25/*---------------------------------------------------------------------------- 28/*----------------------------------------------------------------------------
26 * MAC BLOCK defines 29 * MAC BLOCK defines
27 *---------------------------------------------------------------------------*/ 30 *---------------------------------------------------------------------------*/
@@ -114,3 +117,5 @@ enum ttc_control {
114#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */ 117#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
115#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */ 118#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
116#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */ 119#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
120
121extern struct stmmac_dma_ops dwmac100_dma_ops;
diff --git a/drivers/net/stmmac/dwmac1000.h b/drivers/net/stmmac/dwmac1000.h
index 62dca0e384e7..d8d0f3553770 100644
--- a/drivers/net/stmmac/dwmac1000.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -172,7 +172,6 @@ enum rfd {
172 deac_full_minus_4 = 0x00401800, 172 deac_full_minus_4 = 0x00401800,
173}; 173};
174#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ 174#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
175#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
176 175
177enum ttc_control { 176enum ttc_control {
178 DMA_CONTROL_TTC_64 = 0x00000000, 177 DMA_CONTROL_TTC_64 = 0x00000000,
@@ -206,15 +205,4 @@ enum rtc_control {
206#define GMAC_MMC_TX_INTR 0x108 205#define GMAC_MMC_TX_INTR 0x108
207#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 206#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
208 207
209#undef DWMAC1000_DEBUG
210/* #define DWMAC1000__DEBUG */
211#undef FRAME_FILTER_DEBUG
212/* #define FRAME_FILTER_DEBUG */
213#ifdef DWMAC1000__DEBUG
214#define DBG(fmt, args...) printk(fmt, ## args)
215#else
216#define DBG(fmt, args...) do { } while (0)
217#endif
218
219extern struct stmmac_dma_ops dwmac1000_dma_ops; 208extern struct stmmac_dma_ops dwmac1000_dma_ops;
220extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
index 5bd95ebfe498..0aa89ae9b8e9 100644
--- a/drivers/net/stmmac/dwmac1000_core.c
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -83,8 +83,8 @@ static void dwmac1000_set_filter(struct net_device *dev)
83 unsigned long ioaddr = dev->base_addr; 83 unsigned long ioaddr = dev->base_addr;
84 unsigned int value = 0; 84 unsigned int value = 0;
85 85
86 DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n", 86 CHIP_DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
87 __func__, netdev_mc_count(dev), netdev_uc_count(dev)); 87 __func__, netdev_mc_count(dev), netdev_uc_count(dev));
88 88
89 if (dev->flags & IFF_PROMISC) 89 if (dev->flags & IFF_PROMISC)
90 value = GMAC_FRAME_FILTER_PR; 90 value = GMAC_FRAME_FILTER_PR;
@@ -95,17 +95,17 @@ static void dwmac1000_set_filter(struct net_device *dev)
95 writel(0xffffffff, ioaddr + GMAC_HASH_LOW); 95 writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
96 } else if (!netdev_mc_empty(dev)) { 96 } else if (!netdev_mc_empty(dev)) {
97 u32 mc_filter[2]; 97 u32 mc_filter[2];
98 struct dev_mc_list *mclist; 98 struct netdev_hw_addr *ha;
99 99
100 /* Hash filter for multicast */ 100 /* Hash filter for multicast */
101 value = GMAC_FRAME_FILTER_HMC; 101 value = GMAC_FRAME_FILTER_HMC;
102 102
103 memset(mc_filter, 0, sizeof(mc_filter)); 103 memset(mc_filter, 0, sizeof(mc_filter));
104 netdev_for_each_mc_addr(mclist, dev) { 104 netdev_for_each_mc_addr(ha, dev) {
105 /* The upper 6 bits of the calculated CRC are used to 105 /* The upper 6 bits of the calculated CRC are used to
106 index the contens of the hash table */ 106 index the contens of the hash table */
107 int bit_nr = 107 int bit_nr =
108 bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26; 108 bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
109 /* The most significant bit determines the register to 109 /* The most significant bit determines the register to
110 * use (H/L) while the other 5 bits determine the bit 110 * use (H/L) while the other 5 bits determine the bit
111 * within the register. */ 111 * within the register. */
@@ -136,7 +136,7 @@ static void dwmac1000_set_filter(struct net_device *dev)
136#endif 136#endif
137 writel(value, ioaddr + GMAC_FRAME_FILTER); 137 writel(value, ioaddr + GMAC_FRAME_FILTER);
138 138
139 DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: " 139 CHIP_DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
140 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER), 140 "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
141 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW)); 141 readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
142 142
@@ -148,18 +148,18 @@ static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
148{ 148{
149 unsigned int flow = 0; 149 unsigned int flow = 0;
150 150
151 DBG(KERN_DEBUG "GMAC Flow-Control:\n"); 151 CHIP_DBG(KERN_DEBUG "GMAC Flow-Control:\n");
152 if (fc & FLOW_RX) { 152 if (fc & FLOW_RX) {
153 DBG(KERN_DEBUG "\tReceive Flow-Control ON\n"); 153 CHIP_DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
154 flow |= GMAC_FLOW_CTRL_RFE; 154 flow |= GMAC_FLOW_CTRL_RFE;
155 } 155 }
156 if (fc & FLOW_TX) { 156 if (fc & FLOW_TX) {
157 DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n"); 157 CHIP_DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
158 flow |= GMAC_FLOW_CTRL_TFE; 158 flow |= GMAC_FLOW_CTRL_TFE;
159 } 159 }
160 160
161 if (duplex) { 161 if (duplex) {
162 DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time); 162 CHIP_DBG(KERN_DEBUG "\tduplex mode: PAUSE %d\n", pause_time);
163 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT); 163 flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
164 } 164 }
165 165
@@ -172,10 +172,10 @@ static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
172 unsigned int pmt = 0; 172 unsigned int pmt = 0;
173 173
174 if (mode == WAKE_MAGIC) { 174 if (mode == WAKE_MAGIC) {
175 DBG(KERN_DEBUG "GMAC: WOL Magic frame\n"); 175 CHIP_DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
176 pmt |= power_down | magic_pkt_en; 176 pmt |= power_down | magic_pkt_en;
177 } else if (mode == WAKE_UCAST) { 177 } else if (mode == WAKE_UCAST) {
178 DBG(KERN_DEBUG "GMAC: WOL on global unicast\n"); 178 CHIP_DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
179 pmt |= global_unicast; 179 pmt |= global_unicast;
180 } 180 }
181 181
@@ -190,16 +190,16 @@ static void dwmac1000_irq_status(unsigned long ioaddr)
190 190
191 /* Not used events (e.g. MMC interrupts) are not handled. */ 191 /* Not used events (e.g. MMC interrupts) are not handled. */
192 if ((intr_status & mmc_tx_irq)) 192 if ((intr_status & mmc_tx_irq))
193 DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n", 193 CHIP_DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
194 readl(ioaddr + GMAC_MMC_TX_INTR)); 194 readl(ioaddr + GMAC_MMC_TX_INTR));
195 if (unlikely(intr_status & mmc_rx_irq)) 195 if (unlikely(intr_status & mmc_rx_irq))
196 DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n", 196 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
197 readl(ioaddr + GMAC_MMC_RX_INTR)); 197 readl(ioaddr + GMAC_MMC_RX_INTR));
198 if (unlikely(intr_status & mmc_rx_csum_offload_irq)) 198 if (unlikely(intr_status & mmc_rx_csum_offload_irq))
199 DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n", 199 CHIP_DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
200 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD)); 200 readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
201 if (unlikely(intr_status & pmt_irq)) { 201 if (unlikely(intr_status & pmt_irq)) {
202 DBG(KERN_DEBUG "GMAC: received Magic frame\n"); 202 CHIP_DBG(KERN_DEBUG "GMAC: received Magic frame\n");
203 /* clear the PMT bits 5 and 6 by reading the PMT 203 /* clear the PMT bits 5 and 6 by reading the PMT
204 * status register. */ 204 * status register. */
205 readl(ioaddr + GMAC_PMT); 205 readl(ioaddr + GMAC_PMT);
@@ -230,7 +230,6 @@ struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
230 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL); 230 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
231 231
232 mac->mac = &dwmac1000_ops; 232 mac->mac = &dwmac1000_ops;
233 mac->desc = &dwmac1000_desc_ops;
234 mac->dma = &dwmac1000_dma_ops; 233 mac->dma = &dwmac1000_dma_ops;
235 234
236 mac->pmt = PMT_SUPPORTED; 235 mac->pmt = PMT_SUPPORTED;
diff --git a/drivers/net/stmmac/dwmac1000_dma.c b/drivers/net/stmmac/dwmac1000_dma.c
index 39d436a2da68..a547aa99e114 100644
--- a/drivers/net/stmmac/dwmac1000_dma.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,7 +3,7 @@
3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for 3 DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
4 developing this code. 4 developing this code.
5 5
6 This contains the functions to handle the dma and descriptors. 6 This contains the functions to handle the dma.
7 7
8 Copyright (C) 2007-2009 STMicroelectronics Ltd 8 Copyright (C) 2007-2009 STMicroelectronics Ltd
9 9
@@ -58,29 +58,20 @@ static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
58 return 0; 58 return 0;
59} 59}
60 60
61/* Transmit FIFO flush operation */
62static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
65 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
66
67 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
68}
69
70static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode, 61static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
71 int rxmode) 62 int rxmode)
72{ 63{
73 u32 csr6 = readl(ioaddr + DMA_CONTROL); 64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
74 65
75 if (txmode == SF_DMA_MODE) { 66 if (txmode == SF_DMA_MODE) {
76 DBG(KERN_DEBUG "GMAC: enabling TX store and forward mode\n"); 67 CHIP_DBG(KERN_DEBUG "GMAC: enable TX store and forward mode\n");
77 /* Transmit COE type 2 cannot be done in cut-through mode. */ 68 /* Transmit COE type 2 cannot be done in cut-through mode. */
78 csr6 |= DMA_CONTROL_TSF; 69 csr6 |= DMA_CONTROL_TSF;
79 /* Operating on second frame increase the performance 70 /* Operating on second frame increase the performance
80 * especially when transmit store-and-forward is used.*/ 71 * especially when transmit store-and-forward is used.*/
81 csr6 |= DMA_CONTROL_OSF; 72 csr6 |= DMA_CONTROL_OSF;
82 } else { 73 } else {
83 DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode" 74 CHIP_DBG(KERN_DEBUG "GMAC: disabling TX store and forward mode"
84 " (threshold = %d)\n", txmode); 75 " (threshold = %d)\n", txmode);
85 csr6 &= ~DMA_CONTROL_TSF; 76 csr6 &= ~DMA_CONTROL_TSF;
86 csr6 &= DMA_CONTROL_TC_TX_MASK; 77 csr6 &= DMA_CONTROL_TC_TX_MASK;
@@ -98,10 +89,10 @@ static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
98 } 89 }
99 90
100 if (rxmode == SF_DMA_MODE) { 91 if (rxmode == SF_DMA_MODE) {
101 DBG(KERN_DEBUG "GMAC: enabling RX store and forward mode\n"); 92 CHIP_DBG(KERN_DEBUG "GMAC: enable RX store and forward mode\n");
102 csr6 |= DMA_CONTROL_RSF; 93 csr6 |= DMA_CONTROL_RSF;
103 } else { 94 } else {
104 DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode" 95 CHIP_DBG(KERN_DEBUG "GMAC: disabling RX store and forward mode"
105 " (threshold = %d)\n", rxmode); 96 " (threshold = %d)\n", rxmode);
106 csr6 &= ~DMA_CONTROL_RSF; 97 csr6 &= ~DMA_CONTROL_RSF;
107 csr6 &= DMA_CONTROL_TC_RX_MASK; 98 csr6 &= DMA_CONTROL_TC_RX_MASK;
@@ -141,305 +132,6 @@ static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
141 return; 132 return;
142} 133}
143 134
144static int dwmac1000_get_tx_frame_status(void *data,
145 struct stmmac_extra_stats *x,
146 struct dma_desc *p, unsigned long ioaddr)
147{
148 int ret = 0;
149 struct net_device_stats *stats = (struct net_device_stats *)data;
150
151 if (unlikely(p->des01.etx.error_summary)) {
152 DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
153 if (unlikely(p->des01.etx.jabber_timeout)) {
154 DBG(KERN_ERR "\tjabber_timeout error\n");
155 x->tx_jabber++;
156 }
157
158 if (unlikely(p->des01.etx.frame_flushed)) {
159 DBG(KERN_ERR "\tframe_flushed error\n");
160 x->tx_frame_flushed++;
161 dwmac1000_flush_tx_fifo(ioaddr);
162 }
163
164 if (unlikely(p->des01.etx.loss_carrier)) {
165 DBG(KERN_ERR "\tloss_carrier error\n");
166 x->tx_losscarrier++;
167 stats->tx_carrier_errors++;
168 }
169 if (unlikely(p->des01.etx.no_carrier)) {
170 DBG(KERN_ERR "\tno_carrier error\n");
171 x->tx_carrier++;
172 stats->tx_carrier_errors++;
173 }
174 if (unlikely(p->des01.etx.late_collision)) {
175 DBG(KERN_ERR "\tlate_collision error\n");
176 stats->collisions += p->des01.etx.collision_count;
177 }
178 if (unlikely(p->des01.etx.excessive_collisions)) {
179 DBG(KERN_ERR "\texcessive_collisions\n");
180 stats->collisions += p->des01.etx.collision_count;
181 }
182 if (unlikely(p->des01.etx.excessive_deferral)) {
183 DBG(KERN_INFO "\texcessive tx_deferral\n");
184 x->tx_deferred++;
185 }
186
187 if (unlikely(p->des01.etx.underflow_error)) {
188 DBG(KERN_ERR "\tunderflow error\n");
189 dwmac1000_flush_tx_fifo(ioaddr);
190 x->tx_underflow++;
191 }
192
193 if (unlikely(p->des01.etx.ip_header_error)) {
194 DBG(KERN_ERR "\tTX IP header csum error\n");
195 x->tx_ip_header_error++;
196 }
197
198 if (unlikely(p->des01.etx.payload_error)) {
199 DBG(KERN_ERR "\tAddr/Payload csum error\n");
200 x->tx_payload_error++;
201 dwmac1000_flush_tx_fifo(ioaddr);
202 }
203
204 ret = -1;
205 }
206
207 if (unlikely(p->des01.etx.deferred)) {
208 DBG(KERN_INFO "GMAC TX status: tx deferred\n");
209 x->tx_deferred++;
210 }
211#ifdef STMMAC_VLAN_TAG_USED
212 if (p->des01.etx.vlan_frame) {
213 DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
214 x->tx_vlan++;
215 }
216#endif
217
218 return ret;
219}
220
221static int dwmac1000_get_tx_len(struct dma_desc *p)
222{
223 return p->des01.etx.buffer1_size;
224}
225
226static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
227{
228 int ret = good_frame;
229 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
230
231 /* bits 5 7 0 | Frame status
232 * ----------------------------------------------------------
233 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
234 * 1 0 0 | IPv4/6 No CSUM errorS.
235 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
236 * 1 1 0 | IPv4/6 CSUM IP HR error
237 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
238 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
239 * 0 1 1 | COE bypassed.. no IPv4/6 frame
240 * 0 1 0 | Reserved.
241 */
242 if (status == 0x0) {
243 DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
244 ret = good_frame;
245 } else if (status == 0x4) {
246 DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
247 ret = good_frame;
248 } else if (status == 0x5) {
249 DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
250 ret = csum_none;
251 } else if (status == 0x6) {
252 DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
253 ret = csum_none;
254 } else if (status == 0x7) {
255 DBG(KERN_ERR
256 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
257 ret = csum_none;
258 } else if (status == 0x1) {
259 DBG(KERN_ERR
260 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
261 ret = discard_frame;
262 } else if (status == 0x3) {
263 DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
264 ret = discard_frame;
265 }
266 return ret;
267}
268
269static int dwmac1000_get_rx_frame_status(void *data,
270 struct stmmac_extra_stats *x, struct dma_desc *p)
271{
272 int ret = good_frame;
273 struct net_device_stats *stats = (struct net_device_stats *)data;
274
275 if (unlikely(p->des01.erx.error_summary)) {
276 DBG(KERN_ERR "GMAC RX Error Summary... 0x%08x\n", p->des01.erx);
277 if (unlikely(p->des01.erx.descriptor_error)) {
278 DBG(KERN_ERR "\tdescriptor error\n");
279 x->rx_desc++;
280 stats->rx_length_errors++;
281 }
282 if (unlikely(p->des01.erx.overflow_error)) {
283 DBG(KERN_ERR "\toverflow error\n");
284 x->rx_gmac_overflow++;
285 }
286
287 if (unlikely(p->des01.erx.ipc_csum_error))
288 DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
289
290 if (unlikely(p->des01.erx.late_collision)) {
291 DBG(KERN_ERR "\tlate_collision error\n");
292 stats->collisions++;
293 stats->collisions++;
294 }
295 if (unlikely(p->des01.erx.receive_watchdog)) {
296 DBG(KERN_ERR "\treceive_watchdog error\n");
297 x->rx_watchdog++;
298 }
299 if (unlikely(p->des01.erx.error_gmii)) {
300 DBG(KERN_ERR "\tReceive Error\n");
301 x->rx_mii++;
302 }
303 if (unlikely(p->des01.erx.crc_error)) {
304 DBG(KERN_ERR "\tCRC error\n");
305 x->rx_crc++;
306 stats->rx_crc_errors++;
307 }
308 ret = discard_frame;
309 }
310
311 /* After a payload csum error, the ES bit is set.
312 * It doesn't match with the information reported into the databook.
313 * At any rate, we need to understand if the CSUM hw computation is ok
314 * and report this info to the upper layers. */
315 ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
316 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
317
318 if (unlikely(p->des01.erx.dribbling)) {
319 DBG(KERN_ERR "GMAC RX: dribbling error\n");
320 ret = discard_frame;
321 }
322 if (unlikely(p->des01.erx.sa_filter_fail)) {
323 DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
324 x->sa_rx_filter_fail++;
325 ret = discard_frame;
326 }
327 if (unlikely(p->des01.erx.da_filter_fail)) {
328 DBG(KERN_ERR "GMAC RX : Destination Address filter fail\n");
329 x->da_rx_filter_fail++;
330 ret = discard_frame;
331 }
332 if (unlikely(p->des01.erx.length_error)) {
333 DBG(KERN_ERR "GMAC RX: length_error error\n");
334 x->rx_length++;
335 ret = discard_frame;
336 }
337#ifdef STMMAC_VLAN_TAG_USED
338 if (p->des01.erx.vlan_tag) {
339 DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
340 x->rx_vlan++;
341 }
342#endif
343 return ret;
344}
345
346static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
347 int disable_rx_ic)
348{
349 int i;
350 for (i = 0; i < ring_size; i++) {
351 p->des01.erx.own = 1;
352 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
353 /* To support jumbo frames */
354 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
355 if (i == ring_size - 1)
356 p->des01.erx.end_ring = 1;
357 if (disable_rx_ic)
358 p->des01.erx.disable_ic = 1;
359 p++;
360 }
361 return;
362}
363
364static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
365{
366 int i;
367
368 for (i = 0; i < ring_size; i++) {
369 p->des01.etx.own = 0;
370 if (i == ring_size - 1)
371 p->des01.etx.end_ring = 1;
372 p++;
373 }
374
375 return;
376}
377
378static int dwmac1000_get_tx_owner(struct dma_desc *p)
379{
380 return p->des01.etx.own;
381}
382
383static int dwmac1000_get_rx_owner(struct dma_desc *p)
384{
385 return p->des01.erx.own;
386}
387
388static void dwmac1000_set_tx_owner(struct dma_desc *p)
389{
390 p->des01.etx.own = 1;
391}
392
393static void dwmac1000_set_rx_owner(struct dma_desc *p)
394{
395 p->des01.erx.own = 1;
396}
397
398static int dwmac1000_get_tx_ls(struct dma_desc *p)
399{
400 return p->des01.etx.last_segment;
401}
402
403static void dwmac1000_release_tx_desc(struct dma_desc *p)
404{
405 int ter = p->des01.etx.end_ring;
406
407 memset(p, 0, sizeof(struct dma_desc));
408 p->des01.etx.end_ring = ter;
409
410 return;
411}
412
413static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
414 int csum_flag)
415{
416 p->des01.etx.first_segment = is_fs;
417 if (unlikely(len > BUF_SIZE_4KiB)) {
418 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
419 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
420 } else {
421 p->des01.etx.buffer1_size = len;
422 }
423 if (likely(csum_flag))
424 p->des01.etx.checksum_insertion = cic_full;
425}
426
427static void dwmac1000_clear_tx_ic(struct dma_desc *p)
428{
429 p->des01.etx.interrupt = 0;
430}
431
432static void dwmac1000_close_tx_desc(struct dma_desc *p)
433{
434 p->des01.etx.last_segment = 1;
435 p->des01.etx.interrupt = 1;
436}
437
438static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
439{
440 return p->des01.erx.frame_length;
441}
442
443struct stmmac_dma_ops dwmac1000_dma_ops = { 135struct stmmac_dma_ops dwmac1000_dma_ops = {
444 .init = dwmac1000_dma_init, 136 .init = dwmac1000_dma_init,
445 .dump_regs = dwmac1000_dump_dma_regs, 137 .dump_regs = dwmac1000_dump_dma_regs,
@@ -454,21 +146,3 @@ struct stmmac_dma_ops dwmac1000_dma_ops = {
454 .stop_rx = dwmac_dma_stop_rx, 146 .stop_rx = dwmac_dma_stop_rx,
455 .dma_interrupt = dwmac_dma_interrupt, 147 .dma_interrupt = dwmac_dma_interrupt,
456}; 148};
457
458struct stmmac_desc_ops dwmac1000_desc_ops = {
459 .tx_status = dwmac1000_get_tx_frame_status,
460 .rx_status = dwmac1000_get_rx_frame_status,
461 .get_tx_len = dwmac1000_get_tx_len,
462 .init_rx_desc = dwmac1000_init_rx_desc,
463 .init_tx_desc = dwmac1000_init_tx_desc,
464 .get_tx_owner = dwmac1000_get_tx_owner,
465 .get_rx_owner = dwmac1000_get_rx_owner,
466 .release_tx_desc = dwmac1000_release_tx_desc,
467 .prepare_tx_desc = dwmac1000_prepare_tx_desc,
468 .clear_tx_ic = dwmac1000_clear_tx_ic,
469 .close_tx_desc = dwmac1000_close_tx_desc,
470 .get_tx_ls = dwmac1000_get_tx_ls,
471 .set_tx_owner = dwmac1000_set_tx_owner,
472 .set_rx_owner = dwmac1000_set_rx_owner,
473 .get_rx_frame_len = dwmac1000_get_rx_frame_len,
474};
diff --git a/drivers/net/stmmac/dwmac100_core.c b/drivers/net/stmmac/dwmac100_core.c
new file mode 100644
index 000000000000..fab14a4cb14c
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_core.c
@@ -0,0 +1,201 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 This only implements the mac core functions for this chip.
9
10 Copyright (C) 2007-2009 STMicroelectronics Ltd
11
12 This program is free software; you can redistribute it and/or modify it
13 under the terms and conditions of the GNU General Public License,
14 version 2, as published by the Free Software Foundation.
15
16 This program is distributed in the hope it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24
25 The full GNU General Public License is included in this distribution in
26 the file called "COPYING".
27
28 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
29*******************************************************************************/
30
31#include <linux/crc32.h>
32#include "dwmac100.h"
33
34static void dwmac100_core_init(unsigned long ioaddr)
35{
36 u32 value = readl(ioaddr + MAC_CONTROL);
37
38 writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
39
40#ifdef STMMAC_VLAN_TAG_USED
41 writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
42#endif
43 return;
44}
45
46static void dwmac100_dump_mac_regs(unsigned long ioaddr)
47{
48 pr_info("\t----------------------------------------------\n"
49 "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
50 "\t----------------------------------------------\n",
51 (unsigned int)ioaddr);
52 pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
53 readl(ioaddr + MAC_CONTROL));
54 pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
55 readl(ioaddr + MAC_ADDR_HIGH));
56 pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
57 readl(ioaddr + MAC_ADDR_LOW));
58 pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
59 MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
60 pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
61 MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
62 pr_info("\tflow control (offset 0x%x): 0x%08x\n",
63 MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
64 pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
65 readl(ioaddr + MAC_VLAN1));
66 pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
67 readl(ioaddr + MAC_VLAN2));
68 pr_info("\n\tMAC management counter registers\n");
69 pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
70 MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
71 pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
72 MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
73 pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
74 MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
75 pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
76 MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
77 pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
78 MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
79 return;
80}
81
82static void dwmac100_irq_status(unsigned long ioaddr)
83{
84 return;
85}
86
87static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
88 unsigned int reg_n)
89{
90 stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
91}
92
93static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
94 unsigned int reg_n)
95{
96 stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
97}
98
99static void dwmac100_set_filter(struct net_device *dev)
100{
101 unsigned long ioaddr = dev->base_addr;
102 u32 value = readl(ioaddr + MAC_CONTROL);
103
104 if (dev->flags & IFF_PROMISC) {
105 value |= MAC_CONTROL_PR;
106 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
107 MAC_CONTROL_HP);
108 } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
109 || (dev->flags & IFF_ALLMULTI)) {
110 value |= MAC_CONTROL_PM;
111 value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
112 writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
113 writel(0xffffffff, ioaddr + MAC_HASH_LOW);
114 } else if (netdev_mc_empty(dev)) { /* no multicast */
115 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
116 MAC_CONTROL_HO | MAC_CONTROL_HP);
117 } else {
118 u32 mc_filter[2];
119 struct netdev_hw_addr *ha;
120
121 /* Perfect filter mode for physical address and Hash
122 filter for multicast */
123 value |= MAC_CONTROL_HP;
124 value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
125 MAC_CONTROL_IF | MAC_CONTROL_HO);
126
127 memset(mc_filter, 0, sizeof(mc_filter));
128 netdev_for_each_mc_addr(ha, dev) {
129 /* The upper 6 bits of the calculated CRC are used to
130 * index the contens of the hash table */
131 int bit_nr =
132 ether_crc(ETH_ALEN, ha->addr) >> 26;
133 /* The most significant bit determines the register to
134 * use (H/L) while the other 5 bits determine the bit
135 * within the register. */
136 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
137 }
138 writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
139 writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
140 }
141
142 writel(value, ioaddr + MAC_CONTROL);
143
144 CHIP_DBG(KERN_INFO "%s: CTRL reg: 0x%08x Hash regs: "
145 "HI 0x%08x, LO 0x%08x\n",
146 __func__, readl(ioaddr + MAC_CONTROL),
147 readl(ioaddr + MAC_HASH_HIGH), readl(ioaddr + MAC_HASH_LOW));
148 return;
149}
150
151static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
152 unsigned int fc, unsigned int pause_time)
153{
154 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
155
156 if (duplex)
157 flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
158 writel(flow, ioaddr + MAC_FLOW_CTRL);
159
160 return;
161}
162
163/* No PMT module supported for this Ethernet Controller.
164 * Tested on ST platforms only.
165 */
166static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
167{
168 return;
169}
170
171struct stmmac_ops dwmac100_ops = {
172 .core_init = dwmac100_core_init,
173 .dump_regs = dwmac100_dump_mac_regs,
174 .host_irq_status = dwmac100_irq_status,
175 .set_filter = dwmac100_set_filter,
176 .flow_ctrl = dwmac100_flow_ctrl,
177 .pmt = dwmac100_pmt,
178 .set_umac_addr = dwmac100_set_umac_addr,
179 .get_umac_addr = dwmac100_get_umac_addr,
180};
181
182struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
183{
184 struct mac_device_info *mac;
185
186 mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
187
188 pr_info("\tDWMAC100\n");
189
190 mac->mac = &dwmac100_ops;
191 mac->dma = &dwmac100_dma_ops;
192
193 mac->pmt = PMT_NOT_SUPPORTED;
194 mac->link.port = MAC_CONTROL_PS;
195 mac->link.duplex = MAC_CONTROL_F;
196 mac->link.speed = 0;
197 mac->mii.addr = MAC_MII_ADDR;
198 mac->mii.data = MAC_MII_DATA;
199
200 return mac;
201}
diff --git a/drivers/net/stmmac/dwmac100_dma.c b/drivers/net/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000000..96d098d68ad6
--- /dev/null
+++ b/drivers/net/stmmac/dwmac100_dma.c
@@ -0,0 +1,138 @@
1/*******************************************************************************
2 This is the driver for the MAC 10/100 on-chip Ethernet controller
3 currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
4
5 DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
6 this code.
7
8 This contains the functions to handle the dma.
9
10 Copyright (C) 2007-2009 STMicroelectronics Ltd
11
12 This program is free software; you can redistribute it and/or modify it
13 under the terms and conditions of the GNU General Public License,
14 version 2, as published by the Free Software Foundation.
15
16 This program is distributed in the hope it will be useful, but WITHOUT
17 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 more details.
20
21 You should have received a copy of the GNU General Public License along with
22 this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24
25 The full GNU General Public License is included in this distribution in
26 the file called "COPYING".
27
28 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
29*******************************************************************************/
30
31#include "dwmac100.h"
32#include "dwmac_dma.h"
33
34static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
35 u32 dma_rx)
36{
37 u32 value = readl(ioaddr + DMA_BUS_MODE);
38 /* DMA SW reset */
39 value |= DMA_BUS_MODE_SFT_RESET;
40 writel(value, ioaddr + DMA_BUS_MODE);
41 do {} while ((readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET));
42
43 /* Enable Application Access by writing to DMA CSR0 */
44 writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
45 ioaddr + DMA_BUS_MODE);
46
47 /* Mask interrupts by writing to CSR7 */
48 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
49
50 /* The base address of the RX/TX descriptor lists must be written into
51 * DMA CSR3 and CSR4, respectively. */
52 writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
53 writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
54
55 return 0;
56}
57
58/* Store and Forward capability is not used at all..
59 * The transmit threshold can be programmed by
60 * setting the TTC bits in the DMA control register.*/
61static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
62 int rxmode)
63{
64 u32 csr6 = readl(ioaddr + DMA_CONTROL);
65
66 if (txmode <= 32)
67 csr6 |= DMA_CONTROL_TTC_32;
68 else if (txmode <= 64)
69 csr6 |= DMA_CONTROL_TTC_64;
70 else
71 csr6 |= DMA_CONTROL_TTC_128;
72
73 writel(csr6, ioaddr + DMA_CONTROL);
74
75 return;
76}
77
78static void dwmac100_dump_dma_regs(unsigned long ioaddr)
79{
80 int i;
81
82 CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
83 for (i = 0; i < 9; i++)
84 pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
85 (DMA_BUS_MODE + i * 4),
86 readl(ioaddr + DMA_BUS_MODE + i * 4));
87 CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
88 DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
89 CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
90 DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
91 return;
92}
93
94/* DMA controller has two counters to track the number of
95 * the receive missed frames. */
96static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
97 unsigned long ioaddr)
98{
99 struct net_device_stats *stats = (struct net_device_stats *)data;
100 u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
101
102 if (unlikely(csr8)) {
103 if (csr8 & DMA_MISSED_FRAME_OVE) {
104 stats->rx_over_errors += 0x800;
105 x->rx_overflow_cntr += 0x800;
106 } else {
107 unsigned int ove_cntr;
108 ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
109 stats->rx_over_errors += ove_cntr;
110 x->rx_overflow_cntr += ove_cntr;
111 }
112
113 if (csr8 & DMA_MISSED_FRAME_OVE_M) {
114 stats->rx_missed_errors += 0xffff;
115 x->rx_missed_cntr += 0xffff;
116 } else {
117 unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
118 stats->rx_missed_errors += miss_f;
119 x->rx_missed_cntr += miss_f;
120 }
121 }
122 return;
123}
124
125struct stmmac_dma_ops dwmac100_dma_ops = {
126 .init = dwmac100_dma_init,
127 .dump_regs = dwmac100_dump_dma_regs,
128 .dma_mode = dwmac100_dma_operation_mode,
129 .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
130 .enable_dma_transmission = dwmac_enable_dma_transmission,
131 .enable_dma_irq = dwmac_enable_dma_irq,
132 .disable_dma_irq = dwmac_disable_dma_irq,
133 .start_tx = dwmac_dma_start_tx,
134 .stop_tx = dwmac_dma_stop_tx,
135 .start_rx = dwmac_dma_start_rx,
136 .stop_rx = dwmac_dma_stop_rx,
137 .dma_interrupt = dwmac_dma_interrupt,
138};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
index de848d9f6060..7b815a1b7b8c 100644
--- a/drivers/net/stmmac/dwmac_dma.h
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -95,6 +95,7 @@
95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */ 95#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */ 96#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */ 97#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
98#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
98 99
99extern void dwmac_enable_dma_transmission(unsigned long ioaddr); 100extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
100extern void dwmac_enable_dma_irq(unsigned long ioaddr); 101extern void dwmac_enable_dma_irq(unsigned long ioaddr);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
index d4adb1eaa447..0a504adb7eb3 100644
--- a/drivers/net/stmmac/dwmac_lib.c
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -227,6 +227,13 @@ int dwmac_dma_interrupt(unsigned long ioaddr,
227 return ret; 227 return ret;
228} 228}
229 229
230void dwmac_dma_flush_tx_fifo(unsigned long ioaddr)
231{
232 u32 csr6 = readl(ioaddr + DMA_CONTROL);
233 writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
234
235 do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
236}
230 237
231void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6], 238void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
232 unsigned int high, unsigned int low) 239 unsigned int high, unsigned int low)
diff --git a/drivers/net/stmmac/enh_desc.c b/drivers/net/stmmac/enh_desc.c
new file mode 100644
index 000000000000..eb5684a1f713
--- /dev/null
+++ b/drivers/net/stmmac/enh_desc.c
@@ -0,0 +1,342 @@
1/*******************************************************************************
2 This contains the functions to handle the enhanced descriptors.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "common.h"
26
27static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr)
29{
30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data;
32
33 if (unlikely(p->des01.etx.error_summary)) {
34 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx);
35 if (unlikely(p->des01.etx.jabber_timeout)) {
36 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n");
37 x->tx_jabber++;
38 }
39
40 if (unlikely(p->des01.etx.frame_flushed)) {
41 CHIP_DBG(KERN_ERR "\tframe_flushed error\n");
42 x->tx_frame_flushed++;
43 dwmac_dma_flush_tx_fifo(ioaddr);
44 }
45
46 if (unlikely(p->des01.etx.loss_carrier)) {
47 CHIP_DBG(KERN_ERR "\tloss_carrier error\n");
48 x->tx_losscarrier++;
49 stats->tx_carrier_errors++;
50 }
51 if (unlikely(p->des01.etx.no_carrier)) {
52 CHIP_DBG(KERN_ERR "\tno_carrier error\n");
53 x->tx_carrier++;
54 stats->tx_carrier_errors++;
55 }
56 if (unlikely(p->des01.etx.late_collision)) {
57 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
58 stats->collisions += p->des01.etx.collision_count;
59 }
60 if (unlikely(p->des01.etx.excessive_collisions)) {
61 CHIP_DBG(KERN_ERR "\texcessive_collisions\n");
62 stats->collisions += p->des01.etx.collision_count;
63 }
64 if (unlikely(p->des01.etx.excessive_deferral)) {
65 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n");
66 x->tx_deferred++;
67 }
68
69 if (unlikely(p->des01.etx.underflow_error)) {
70 CHIP_DBG(KERN_ERR "\tunderflow error\n");
71 dwmac_dma_flush_tx_fifo(ioaddr);
72 x->tx_underflow++;
73 }
74
75 if (unlikely(p->des01.etx.ip_header_error)) {
76 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n");
77 x->tx_ip_header_error++;
78 }
79
80 if (unlikely(p->des01.etx.payload_error)) {
81 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n");
82 x->tx_payload_error++;
83 dwmac_dma_flush_tx_fifo(ioaddr);
84 }
85
86 ret = -1;
87 }
88
89 if (unlikely(p->des01.etx.deferred)) {
90 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n");
91 x->tx_deferred++;
92 }
93#ifdef STMMAC_VLAN_TAG_USED
94 if (p->des01.etx.vlan_frame) {
95 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n");
96 x->tx_vlan++;
97 }
98#endif
99
100 return ret;
101}
102
103static int enh_desc_get_tx_len(struct dma_desc *p)
104{
105 return p->des01.etx.buffer1_size;
106}
107
108static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
109{
110 int ret = good_frame;
111 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
112
113 /* bits 5 7 0 | Frame status
114 * ----------------------------------------------------------
115 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
116 * 1 0 0 | IPv4/6 No CSUM errorS.
117 * 1 0 1 | IPv4/6 CSUM PAYLOAD error
118 * 1 1 0 | IPv4/6 CSUM IP HR error
119 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
120 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
121 * 0 1 1 | COE bypassed.. no IPv4/6 frame
122 * 0 1 0 | Reserved.
123 */
124 if (status == 0x0) {
125 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n");
126 ret = good_frame;
127 } else if (status == 0x4) {
128 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n");
129 ret = good_frame;
130 } else if (status == 0x5) {
131 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n");
132 ret = csum_none;
133 } else if (status == 0x6) {
134 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n");
135 ret = csum_none;
136 } else if (status == 0x7) {
137 CHIP_DBG(KERN_ERR
138 "RX Des0 status: IPv4/6 Header and Payload Error.\n");
139 ret = csum_none;
140 } else if (status == 0x1) {
141 CHIP_DBG(KERN_ERR
142 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n");
143 ret = discard_frame;
144 } else if (status == 0x3) {
145 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n");
146 ret = discard_frame;
147 }
148 return ret;
149}
150
151static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
152 struct dma_desc *p)
153{
154 int ret = good_frame;
155 struct net_device_stats *stats = (struct net_device_stats *)data;
156
157 if (unlikely(p->des01.erx.error_summary)) {
158 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n",
159 p->des01.erx);
160 if (unlikely(p->des01.erx.descriptor_error)) {
161 CHIP_DBG(KERN_ERR "\tdescriptor error\n");
162 x->rx_desc++;
163 stats->rx_length_errors++;
164 }
165 if (unlikely(p->des01.erx.overflow_error)) {
166 CHIP_DBG(KERN_ERR "\toverflow error\n");
167 x->rx_gmac_overflow++;
168 }
169
170 if (unlikely(p->des01.erx.ipc_csum_error))
171 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n");
172
173 if (unlikely(p->des01.erx.late_collision)) {
174 CHIP_DBG(KERN_ERR "\tlate_collision error\n");
175 stats->collisions++;
176 stats->collisions++;
177 }
178 if (unlikely(p->des01.erx.receive_watchdog)) {
179 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n");
180 x->rx_watchdog++;
181 }
182 if (unlikely(p->des01.erx.error_gmii)) {
183 CHIP_DBG(KERN_ERR "\tReceive Error\n");
184 x->rx_mii++;
185 }
186 if (unlikely(p->des01.erx.crc_error)) {
187 CHIP_DBG(KERN_ERR "\tCRC error\n");
188 x->rx_crc++;
189 stats->rx_crc_errors++;
190 }
191 ret = discard_frame;
192 }
193
194 /* After a payload csum error, the ES bit is set.
195 * It doesn't match with the information reported into the databook.
196 * At any rate, we need to understand if the CSUM hw computation is ok
197 * and report this info to the upper layers. */
198 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error,
199 p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
200
201 if (unlikely(p->des01.erx.dribbling)) {
202 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n");
203 ret = discard_frame;
204 }
205 if (unlikely(p->des01.erx.sa_filter_fail)) {
206 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n");
207 x->sa_rx_filter_fail++;
208 ret = discard_frame;
209 }
210 if (unlikely(p->des01.erx.da_filter_fail)) {
211 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n");
212 x->da_rx_filter_fail++;
213 ret = discard_frame;
214 }
215 if (unlikely(p->des01.erx.length_error)) {
216 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n");
217 x->rx_length++;
218 ret = discard_frame;
219 }
220#ifdef STMMAC_VLAN_TAG_USED
221 if (p->des01.erx.vlan_tag) {
222 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n");
223 x->rx_vlan++;
224 }
225#endif
226 return ret;
227}
228
229static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
230 int disable_rx_ic)
231{
232 int i;
233 for (i = 0; i < ring_size; i++) {
234 p->des01.erx.own = 1;
235 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1;
236 /* To support jumbo frames */
237 p->des01.erx.buffer2_size = BUF_SIZE_8KiB - 1;
238 if (i == ring_size - 1)
239 p->des01.erx.end_ring = 1;
240 if (disable_rx_ic)
241 p->des01.erx.disable_ic = 1;
242 p++;
243 }
244 return;
245}
246
247static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
248{
249 int i;
250
251 for (i = 0; i < ring_size; i++) {
252 p->des01.etx.own = 0;
253 if (i == ring_size - 1)
254 p->des01.etx.end_ring = 1;
255 p++;
256 }
257
258 return;
259}
260
261static int enh_desc_get_tx_owner(struct dma_desc *p)
262{
263 return p->des01.etx.own;
264}
265
266static int enh_desc_get_rx_owner(struct dma_desc *p)
267{
268 return p->des01.erx.own;
269}
270
271static void enh_desc_set_tx_owner(struct dma_desc *p)
272{
273 p->des01.etx.own = 1;
274}
275
276static void enh_desc_set_rx_owner(struct dma_desc *p)
277{
278 p->des01.erx.own = 1;
279}
280
281static int enh_desc_get_tx_ls(struct dma_desc *p)
282{
283 return p->des01.etx.last_segment;
284}
285
286static void enh_desc_release_tx_desc(struct dma_desc *p)
287{
288 int ter = p->des01.etx.end_ring;
289
290 memset(p, 0, sizeof(struct dma_desc));
291 p->des01.etx.end_ring = ter;
292
293 return;
294}
295
296static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
297 int csum_flag)
298{
299 p->des01.etx.first_segment = is_fs;
300 if (unlikely(len > BUF_SIZE_4KiB)) {
301 p->des01.etx.buffer1_size = BUF_SIZE_4KiB;
302 p->des01.etx.buffer2_size = len - BUF_SIZE_4KiB;
303 } else {
304 p->des01.etx.buffer1_size = len;
305 }
306 if (likely(csum_flag))
307 p->des01.etx.checksum_insertion = cic_full;
308}
309
310static void enh_desc_clear_tx_ic(struct dma_desc *p)
311{
312 p->des01.etx.interrupt = 0;
313}
314
315static void enh_desc_close_tx_desc(struct dma_desc *p)
316{
317 p->des01.etx.last_segment = 1;
318 p->des01.etx.interrupt = 1;
319}
320
321static int enh_desc_get_rx_frame_len(struct dma_desc *p)
322{
323 return p->des01.erx.frame_length;
324}
325
326struct stmmac_desc_ops enh_desc_ops = {
327 .tx_status = enh_desc_get_tx_status,
328 .rx_status = enh_desc_get_rx_status,
329 .get_tx_len = enh_desc_get_tx_len,
330 .init_rx_desc = enh_desc_init_rx_desc,
331 .init_tx_desc = enh_desc_init_tx_desc,
332 .get_tx_owner = enh_desc_get_tx_owner,
333 .get_rx_owner = enh_desc_get_rx_owner,
334 .release_tx_desc = enh_desc_release_tx_desc,
335 .prepare_tx_desc = enh_desc_prepare_tx_desc,
336 .clear_tx_ic = enh_desc_clear_tx_ic,
337 .close_tx_desc = enh_desc_close_tx_desc,
338 .get_tx_ls = enh_desc_get_tx_ls,
339 .set_tx_owner = enh_desc_set_tx_owner,
340 .set_rx_owner = enh_desc_set_rx_owner,
341 .get_rx_frame_len = enh_desc_get_rx_frame_len,
342};
diff --git a/drivers/net/stmmac/norm_desc.c b/drivers/net/stmmac/norm_desc.c
new file mode 100644
index 000000000000..ecfcc001a04a
--- /dev/null
+++ b/drivers/net/stmmac/norm_desc.c
@@ -0,0 +1,240 @@
1/*******************************************************************************
2 This contains the functions to handle the normal descriptors.
3
4 Copyright (C) 2007-2009 STMicroelectronics Ltd
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
23*******************************************************************************/
24
25#include "common.h"
26
27static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
28 struct dma_desc *p, unsigned long ioaddr)
29{
30 int ret = 0;
31 struct net_device_stats *stats = (struct net_device_stats *)data;
32
33 if (unlikely(p->des01.tx.error_summary)) {
34 if (unlikely(p->des01.tx.underflow_error)) {
35 x->tx_underflow++;
36 stats->tx_fifo_errors++;
37 }
38 if (unlikely(p->des01.tx.no_carrier)) {
39 x->tx_carrier++;
40 stats->tx_carrier_errors++;
41 }
42 if (unlikely(p->des01.tx.loss_carrier)) {
43 x->tx_losscarrier++;
44 stats->tx_carrier_errors++;
45 }
46 if (unlikely((p->des01.tx.excessive_deferral) ||
47 (p->des01.tx.excessive_collisions) ||
48 (p->des01.tx.late_collision)))
49 stats->collisions += p->des01.tx.collision_count;
50 ret = -1;
51 }
52 if (unlikely(p->des01.tx.heartbeat_fail)) {
53 x->tx_heartbeat++;
54 stats->tx_heartbeat_errors++;
55 ret = -1;
56 }
57 if (unlikely(p->des01.tx.deferred))
58 x->tx_deferred++;
59
60 return ret;
61}
62
63static int ndesc_get_tx_len(struct dma_desc *p)
64{
65 return p->des01.tx.buffer1_size;
66}
67
68/* This function verifies if each incoming frame has some errors
69 * and, if required, updates the multicast statistics.
70 * In case of success, it returns csum_none becasue the device
71 * is not able to compute the csum in HW. */
72static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
73 struct dma_desc *p)
74{
75 int ret = csum_none;
76 struct net_device_stats *stats = (struct net_device_stats *)data;
77
78 if (unlikely(p->des01.rx.last_descriptor == 0)) {
79 pr_warning("ndesc Error: Oversized Ethernet "
80 "frame spanned multiple buffers\n");
81 stats->rx_length_errors++;
82 return discard_frame;
83 }
84
85 if (unlikely(p->des01.rx.error_summary)) {
86 if (unlikely(p->des01.rx.descriptor_error))
87 x->rx_desc++;
88 if (unlikely(p->des01.rx.partial_frame_error))
89 x->rx_partial++;
90 if (unlikely(p->des01.rx.run_frame))
91 x->rx_runt++;
92 if (unlikely(p->des01.rx.frame_too_long))
93 x->rx_toolong++;
94 if (unlikely(p->des01.rx.collision)) {
95 x->rx_collision++;
96 stats->collisions++;
97 }
98 if (unlikely(p->des01.rx.crc_error)) {
99 x->rx_crc++;
100 stats->rx_crc_errors++;
101 }
102 ret = discard_frame;
103 }
104 if (unlikely(p->des01.rx.dribbling))
105 ret = discard_frame;
106
107 if (unlikely(p->des01.rx.length_error)) {
108 x->rx_length++;
109 ret = discard_frame;
110 }
111 if (unlikely(p->des01.rx.mii_error)) {
112 x->rx_mii++;
113 ret = discard_frame;
114 }
115 if (p->des01.rx.multicast_frame) {
116 x->rx_multicast++;
117 stats->multicast++;
118 }
119 return ret;
120}
121
122static void ndesc_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
123 int disable_rx_ic)
124{
125 int i;
126 for (i = 0; i < ring_size; i++) {
127 p->des01.rx.own = 1;
128 p->des01.rx.buffer1_size = BUF_SIZE_2KiB - 1;
129 if (i == ring_size - 1)
130 p->des01.rx.end_ring = 1;
131 if (disable_rx_ic)
132 p->des01.rx.disable_ic = 1;
133 p++;
134 }
135 return;
136}
137
138static void ndesc_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
139{
140 int i;
141 for (i = 0; i < ring_size; i++) {
142 p->des01.tx.own = 0;
143 if (i == ring_size - 1)
144 p->des01.tx.end_ring = 1;
145 p++;
146 }
147 return;
148}
149
150static int ndesc_get_tx_owner(struct dma_desc *p)
151{
152 return p->des01.tx.own;
153}
154
155static int ndesc_get_rx_owner(struct dma_desc *p)
156{
157 return p->des01.rx.own;
158}
159
160static void ndesc_set_tx_owner(struct dma_desc *p)
161{
162 p->des01.tx.own = 1;
163}
164
165static void ndesc_set_rx_owner(struct dma_desc *p)
166{
167 p->des01.rx.own = 1;
168}
169
170static int ndesc_get_tx_ls(struct dma_desc *p)
171{
172 return p->des01.tx.last_segment;
173}
174
175static void ndesc_release_tx_desc(struct dma_desc *p)
176{
177 int ter = p->des01.tx.end_ring;
178
179 /* clean field used within the xmit */
180 p->des01.tx.first_segment = 0;
181 p->des01.tx.last_segment = 0;
182 p->des01.tx.buffer1_size = 0;
183
184 /* clean status reported */
185 p->des01.tx.error_summary = 0;
186 p->des01.tx.underflow_error = 0;
187 p->des01.tx.no_carrier = 0;
188 p->des01.tx.loss_carrier = 0;
189 p->des01.tx.excessive_deferral = 0;
190 p->des01.tx.excessive_collisions = 0;
191 p->des01.tx.late_collision = 0;
192 p->des01.tx.heartbeat_fail = 0;
193 p->des01.tx.deferred = 0;
194
195 /* set termination field */
196 p->des01.tx.end_ring = ter;
197
198 return;
199}
200
201static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
202 int csum_flag)
203{
204 p->des01.tx.first_segment = is_fs;
205 p->des01.tx.buffer1_size = len;
206}
207
208static void ndesc_clear_tx_ic(struct dma_desc *p)
209{
210 p->des01.tx.interrupt = 0;
211}
212
213static void ndesc_close_tx_desc(struct dma_desc *p)
214{
215 p->des01.tx.last_segment = 1;
216 p->des01.tx.interrupt = 1;
217}
218
219static int ndesc_get_rx_frame_len(struct dma_desc *p)
220{
221 return p->des01.rx.frame_length;
222}
223
224struct stmmac_desc_ops ndesc_ops = {
225 .tx_status = ndesc_get_tx_status,
226 .rx_status = ndesc_get_rx_status,
227 .get_tx_len = ndesc_get_tx_len,
228 .init_rx_desc = ndesc_init_rx_desc,
229 .init_tx_desc = ndesc_init_tx_desc,
230 .get_tx_owner = ndesc_get_tx_owner,
231 .get_rx_owner = ndesc_get_rx_owner,
232 .release_tx_desc = ndesc_release_tx_desc,
233 .prepare_tx_desc = ndesc_prepare_tx_desc,
234 .clear_tx_ic = ndesc_clear_tx_ic,
235 .close_tx_desc = ndesc_close_tx_desc,
236 .get_tx_ls = ndesc_get_tx_ls,
237 .set_tx_owner = ndesc_set_tx_owner,
238 .set_rx_owner = ndesc_set_rx_owner,
239 .get_rx_frame_len = ndesc_get_rx_frame_len,
240};
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index ba35e6943cf4..ebebc644b1b8 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,14 +20,9 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Jan_2010" 23#define DRV_MODULE_VERSION "Apr_2010"
24#include <linux/stmmac.h> 24#include <linux/stmmac.h>
25 25
26#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
27#define STMMAC_VLAN_TAG_USED
28#include <linux/if_vlan.h>
29#endif
30
31#include "common.h" 26#include "common.h"
32#ifdef CONFIG_STMMAC_TIMER 27#ifdef CONFIG_STMMAC_TIMER
33#include "stmmac_timer.h" 28#include "stmmac_timer.h"
@@ -93,6 +88,7 @@ struct stmmac_priv {
93#ifdef STMMAC_VLAN_TAG_USED 88#ifdef STMMAC_VLAN_TAG_USED
94 struct vlan_group *vlgrp; 89 struct vlan_group *vlgrp;
95#endif 90#endif
91 int enh_desc;
96}; 92};
97 93
98#ifdef CONFIG_STM_DRIVERS 94#ifdef CONFIG_STM_DRIVERS
@@ -120,3 +116,5 @@ static inline int stmmac_claim_resource(struct platform_device *pdev)
120extern int stmmac_mdio_unregister(struct net_device *ndev); 116extern int stmmac_mdio_unregister(struct net_device *ndev);
121extern int stmmac_mdio_register(struct net_device *ndev); 117extern int stmmac_mdio_register(struct net_device *ndev);
122extern void stmmac_set_ethtool_ops(struct net_device *netdev); 118extern void stmmac_set_ethtool_ops(struct net_device *netdev);
119extern struct stmmac_desc_ops enh_desc_ops;
120extern struct stmmac_desc_ops ndesc_ops;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 4111a85ec80e..7ac6ddea989e 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -837,7 +837,7 @@ static int stmmac_open(struct net_device *dev)
837#ifdef CONFIG_STMMAC_TIMER 837#ifdef CONFIG_STMMAC_TIMER
838 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL); 838 priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
839 if (unlikely(priv->tm == NULL)) { 839 if (unlikely(priv->tm == NULL)) {
840 pr_err("%s: ERROR: timer memory alloc failed \n", __func__); 840 pr_err("%s: ERROR: timer memory alloc failed\n", __func__);
841 return -ENOMEM; 841 return -ENOMEM;
842 } 842 }
843 priv->tm->freq = tmrate; 843 priv->tm->freq = tmrate;
@@ -1280,7 +1280,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
1280 1280
1281 priv->dev->stats.rx_packets++; 1281 priv->dev->stats.rx_packets++;
1282 priv->dev->stats.rx_bytes += frame_len; 1282 priv->dev->stats.rx_bytes += frame_len;
1283 priv->dev->last_rx = jiffies;
1284 } 1283 }
1285 entry = next_entry; 1284 entry = next_entry;
1286 p = p_next; /* use prefetched values */ 1285 p = p_next; /* use prefetched values */
@@ -1587,6 +1586,12 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1587 else 1586 else
1588 device = dwmac100_setup(ioaddr); 1587 device = dwmac100_setup(ioaddr);
1589 1588
1589 if (priv->enh_desc) {
1590 device->desc = &enh_desc_ops;
1591 pr_info("\tEnhanced descriptor structure\n");
1592 } else
1593 device->desc = &ndesc_ops;
1594
1590 if (!device) 1595 if (!device)
1591 return -ENOMEM; 1596 return -ENOMEM;
1592 1597
@@ -1727,6 +1732,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1727 priv->bus_id = plat_dat->bus_id; 1732 priv->bus_id = plat_dat->bus_id;
1728 priv->pbl = plat_dat->pbl; /* TLI */ 1733 priv->pbl = plat_dat->pbl; /* TLI */
1729 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */ 1734 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1735 priv->enh_desc = plat_dat->enh_desc;
1730 1736
1731 platform_set_drvdata(pdev, ndev); 1737 platform_set_drvdata(pdev, ndev);
1732 1738
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index 8b28c89a9a77..31ab4ab0796f 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -412,7 +412,7 @@ static int init586(struct net_device *dev)
412 volatile struct iasetup_cmd_struct *ias_cmd; 412 volatile struct iasetup_cmd_struct *ias_cmd;
413 volatile struct tdr_cmd_struct *tdr_cmd; 413 volatile struct tdr_cmd_struct *tdr_cmd;
414 volatile struct mcsetup_cmd_struct *mc_cmd; 414 volatile struct mcsetup_cmd_struct *mc_cmd;
415 struct dev_mc_list *dmi; 415 struct netdev_hw_addr *ha;
416 int num_addrs=netdev_mc_count(dev); 416 int num_addrs=netdev_mc_count(dev);
417 417
418 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct)); 418 ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,9 +536,9 @@ static int init586(struct net_device *dev)
536 mc_cmd->mc_cnt = swab16(num_addrs * 6); 536 mc_cmd->mc_cnt = swab16(num_addrs * 6);
537 537
538 i = 0; 538 i = 0;
539 netdev_for_each_mc_addr(dmi, dev) 539 netdev_for_each_mc_addr(ha, dev)
540 memcpy((char *) mc_cmd->mc_list[i++], 540 memcpy((char *) mc_cmd->mc_list[i++],
541 dmi->dmi_addr, ETH_ALEN); 541 ha->addr, ETH_ALEN);
542 542
543 p->scb->cbl_offset = make16(mc_cmd); 543 p->scb->cbl_offset = make16(mc_cmd);
544 p->scb->cmd_cuc = CUC_START; 544 p->scb->cmd_cuc = CUC_START;
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index ed7865a0b5b2..52913155ce47 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev)
999{ 999{
1000 struct bigmac *bp = netdev_priv(dev); 1000 struct bigmac *bp = netdev_priv(dev);
1001 void __iomem *bregs = bp->bregs; 1001 void __iomem *bregs = bp->bregs;
1002 struct dev_mc_list *dmi; 1002 struct netdev_hw_addr *ha;
1003 char *addrs; 1003 char *addrs;
1004 int i; 1004 int i;
1005 u32 tmp, crc; 1005 u32 tmp, crc;
@@ -1028,8 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev)
1028 for (i = 0; i < 4; i++) 1028 for (i = 0; i < 4; i++)
1029 hash_table[i] = 0; 1029 hash_table[i] = 0;
1030 1030
1031 netdev_for_each_mc_addr(dmi, dev) { 1031 netdev_for_each_mc_addr(ha, dev) {
1032 addrs = dmi->dmi_addr; 1032 addrs = ha->addr;
1033 1033
1034 if (!(*addrs & 1)) 1034 if (!(*addrs & 1))
1035 continue; 1035 continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 8249a394a4e1..da45f01279c4 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1522,13 +1522,13 @@ static void set_rx_mode(struct net_device *dev)
1522 memset(mc_filter, 0xff, sizeof(mc_filter)); 1522 memset(mc_filter, 0xff, sizeof(mc_filter));
1523 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1523 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1524 } else if (!netdev_mc_empty(dev)) { 1524 } else if (!netdev_mc_empty(dev)) {
1525 struct dev_mc_list *mclist; 1525 struct netdev_hw_addr *ha;
1526 int bit; 1526 int bit;
1527 int index; 1527 int index;
1528 int crc; 1528 int crc;
1529 memset (mc_filter, 0, sizeof (mc_filter)); 1529 memset (mc_filter, 0, sizeof (mc_filter));
1530 netdev_for_each_mc_addr(mclist, dev) { 1530 netdev_for_each_mc_addr(ha, dev) {
1531 crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr); 1531 crc = ether_crc_le(ETH_ALEN, ha->addr);
1532 for (index=0, bit=0; bit < 6; bit++, crc <<= 1) 1532 for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
1533 if (crc & 0x80000000) index |= 1 << bit; 1533 if (crc & 0x80000000) index |= 1 << bit;
1534 mc_filter[index/16] |= (1 << (index % 16)); 1534 mc_filter[index/16] |= (1 << (index % 16));
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index e6880f1c4e8c..2b78e97ea9c0 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1846,12 +1846,12 @@ static u32 gem_setup_multicast(struct gem *gp)
1846 } else { 1846 } else {
1847 u16 hash_table[16]; 1847 u16 hash_table[16];
1848 u32 crc; 1848 u32 crc;
1849 struct dev_mc_list *dmi; 1849 struct netdev_hw_addr *ha;
1850 int i; 1850 int i;
1851 1851
1852 memset(hash_table, 0, sizeof(hash_table)); 1852 memset(hash_table, 0, sizeof(hash_table));
1853 netdev_for_each_mc_addr(dmi, gp->dev) { 1853 netdev_for_each_mc_addr(ha, gp->dev) {
1854 char *addrs = dmi->dmi_addr; 1854 char *addrs = ha->addr;
1855 1855
1856 if (!(*addrs & 1)) 1856 if (!(*addrs & 1))
1857 continue; 1857 continue;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index b17dbb11bd67..20deb14e98ff 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1523,13 +1523,13 @@ static int happy_meal_init(struct happy_meal *hp)
1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); 1523 hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) { 1524 } else if ((hp->dev->flags & IFF_PROMISC) == 0) {
1525 u16 hash_table[4]; 1525 u16 hash_table[4];
1526 struct dev_mc_list *dmi; 1526 struct netdev_hw_addr *ha;
1527 char *addrs; 1527 char *addrs;
1528 u32 crc; 1528 u32 crc;
1529 1529
1530 memset(hash_table, 0, sizeof(hash_table)); 1530 memset(hash_table, 0, sizeof(hash_table));
1531 netdev_for_each_mc_addr(dmi, hp->dev) { 1531 netdev_for_each_mc_addr(ha, hp->dev) {
1532 addrs = dmi->dmi_addr; 1532 addrs = ha->addr;
1533 1533
1534 if (!(*addrs & 1)) 1534 if (!(*addrs & 1))
1535 continue; 1535 continue;
@@ -2362,7 +2362,7 @@ static void happy_meal_set_multicast(struct net_device *dev)
2362{ 2362{
2363 struct happy_meal *hp = netdev_priv(dev); 2363 struct happy_meal *hp = netdev_priv(dev);
2364 void __iomem *bregs = hp->bigmacregs; 2364 void __iomem *bregs = hp->bigmacregs;
2365 struct dev_mc_list *dmi; 2365 struct netdev_hw_addr *ha;
2366 char *addrs; 2366 char *addrs;
2367 u32 crc; 2367 u32 crc;
2368 2368
@@ -2380,8 +2380,8 @@ static void happy_meal_set_multicast(struct net_device *dev)
2380 u16 hash_table[4]; 2380 u16 hash_table[4];
2381 2381
2382 memset(hash_table, 0, sizeof(hash_table)); 2382 memset(hash_table, 0, sizeof(hash_table));
2383 netdev_for_each_mc_addr(dmi, dev) { 2383 netdev_for_each_mc_addr(ha, dev) {
2384 addrs = dmi->dmi_addr; 2384 addrs = ha->addr;
2385 2385
2386 if (!(*addrs & 1)) 2386 if (!(*addrs & 1))
2387 continue; 2387 continue;
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 0c21653ff9f9..c7748b73fa09 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1170,7 +1170,7 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
1170static void lance_load_multicast(struct net_device *dev) 1170static void lance_load_multicast(struct net_device *dev)
1171{ 1171{
1172 struct lance_private *lp = netdev_priv(dev); 1172 struct lance_private *lp = netdev_priv(dev);
1173 struct dev_mc_list *dmi; 1173 struct netdev_hw_addr *ha;
1174 char *addrs; 1174 char *addrs;
1175 u32 crc; 1175 u32 crc;
1176 u32 val; 1176 u32 val;
@@ -1195,8 +1195,8 @@ static void lance_load_multicast(struct net_device *dev)
1195 return; 1195 return;
1196 1196
1197 /* Add addresses */ 1197 /* Add addresses */
1198 netdev_for_each_mc_addr(dmi, dev) { 1198 netdev_for_each_mc_addr(ha, dev) {
1199 addrs = dmi->dmi_addr; 1199 addrs = ha->addr;
1200 1200
1201 /* multicast address? */ 1201 /* multicast address? */
1202 if (!(*addrs & 1)) 1202 if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index be637dce944c..239f09772191 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
627static void qe_set_multicast(struct net_device *dev) 627static void qe_set_multicast(struct net_device *dev)
628{ 628{
629 struct sunqe *qep = netdev_priv(dev); 629 struct sunqe *qep = netdev_priv(dev);
630 struct dev_mc_list *dmi; 630 struct netdev_hw_addr *ha;
631 u8 new_mconfig = qep->mconfig; 631 u8 new_mconfig = qep->mconfig;
632 char *addrs; 632 char *addrs;
633 int i; 633 int i;
@@ -651,8 +651,8 @@ static void qe_set_multicast(struct net_device *dev)
651 u8 *hbytes = (unsigned char *) &hash_table[0]; 651 u8 *hbytes = (unsigned char *) &hash_table[0];
652 652
653 memset(hash_table, 0, sizeof(hash_table)); 653 memset(hash_table, 0, sizeof(hash_table));
654 netdev_for_each_mc_addr(dmi, dev) { 654 netdev_for_each_mc_addr(ha, dev) {
655 addrs = dmi->dmi_addr; 655 addrs = ha->addr;
656 656
657 if (!(*addrs & 1)) 657 if (!(*addrs & 1))
658 continue; 658 continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index 6b1b7cea7f6b..6cf8b06be5cd 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -763,12 +763,12 @@ static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
763 763
764static void __update_mc_list(struct vnet *vp, struct net_device *dev) 764static void __update_mc_list(struct vnet *vp, struct net_device *dev)
765{ 765{
766 struct dev_addr_list *p; 766 struct netdev_hw_addr *ha;
767 767
768 netdev_for_each_mc_addr(p, dev) { 768 netdev_for_each_mc_addr(ha, dev) {
769 struct vnet_mcast_entry *m; 769 struct vnet_mcast_entry *m;
770 770
771 m = __vnet_mc_find(vp, p->dmi_addr); 771 m = __vnet_mc_find(vp, ha->addr);
772 if (m) { 772 if (m) {
773 m->hit = 1; 773 m->hit = 1;
774 continue; 774 continue;
@@ -778,7 +778,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
778 m = kzalloc(sizeof(*m), GFP_ATOMIC); 778 m = kzalloc(sizeof(*m), GFP_ATOMIC);
779 if (!m) 779 if (!m)
780 continue; 780 continue;
781 memcpy(m->addr, p->dmi_addr, ETH_ALEN); 781 memcpy(m->addr, ha->addr, ETH_ALEN);
782 m->hit = 1; 782 m->hit = 1;
783 783
784 m->next = vp->mcast_list; 784 m->next = vp->mcast_list;
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index 49bd84c0d583..36149ddace46 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -1954,16 +1954,16 @@ tc35815_set_multicast_list(struct net_device *dev)
1954 /* Disable promiscuous mode, use normal mode. */ 1954 /* Disable promiscuous mode, use normal mode. */
1955 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl); 1955 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
1956 } else if (!netdev_mc_empty(dev)) { 1956 } else if (!netdev_mc_empty(dev)) {
1957 struct dev_mc_list *cur_addr; 1957 struct netdev_hw_addr *ha;
1958 int i; 1958 int i;
1959 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE); 1959 int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
1960 1960
1961 tc_writel(0, &tr->CAM_Ctl); 1961 tc_writel(0, &tr->CAM_Ctl);
1962 /* Walk the address list, and load the filter */ 1962 /* Walk the address list, and load the filter */
1963 i = 0; 1963 i = 0;
1964 netdev_for_each_mc_addr(cur_addr, dev) { 1964 netdev_for_each_mc_addr(ha, dev) {
1965 /* entry 0,1 is reserved. */ 1965 /* entry 0,1 is reserved. */
1966 tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr); 1966 tc35815_set_cam_entry(dev, i + 2, ha->addr);
1967 ena_bits |= CAM_Ena_Bit(i + 2); 1967 ena_bits |= CAM_Ena_Bit(i + 2);
1968 i++; 1968 i++;
1969 } 1969 }
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index f5493092521a..e29f495c6a2b 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -808,7 +808,7 @@ static void bdx_setmulti(struct net_device *ndev)
808 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0); 808 WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
809 } else if (!netdev_mc_empty(ndev)) { 809 } else if (!netdev_mc_empty(ndev)) {
810 u8 hash; 810 u8 hash;
811 struct dev_mc_list *mclist; 811 struct netdev_hw_addr *ha;
812 u32 reg, val; 812 u32 reg, val;
813 813
814 /* set IMF to deny all multicast frames */ 814 /* set IMF to deny all multicast frames */
@@ -825,10 +825,10 @@ static void bdx_setmulti(struct net_device *ndev)
825 * into RX_MAC_MCST regs. we skip this phase now and accept ALL 825 * into RX_MAC_MCST regs. we skip this phase now and accept ALL
826 * multicast frames throu IMF */ 826 * multicast frames throu IMF */
827 /* accept the rest of addresses throu IMF */ 827 /* accept the rest of addresses throu IMF */
828 netdev_for_each_mc_addr(mclist, ndev) { 828 netdev_for_each_mc_addr(ha, ndev) {
829 hash = 0; 829 hash = 0;
830 for (i = 0; i < ETH_ALEN; i++) 830 for (i = 0; i < ETH_ALEN; i++)
831 hash ^= mclist->dmi_addr[i]; 831 hash ^= ha->addr[i];
832 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2); 832 reg = regRX_MCST_HASH0 + ((hash >> 5) << 2);
833 val = READ_REG(priv, reg); 833 val = READ_REG(priv, reg);
834 val |= (1 << (hash % 32)); 834 val |= (1 << (hash % 32));
@@ -1303,7 +1303,6 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
1303 priv->net_stats.rx_bytes += len; 1303 priv->net_stats.rx_bytes += len;
1304 1304
1305 skb_put(skb, len); 1305 skb_put(skb, len);
1306 skb->dev = priv->ndev;
1307 skb->ip_summed = CHECKSUM_UNNECESSARY; 1306 skb->ip_summed = CHECKSUM_UNNECESSARY;
1308 skb->protocol = eth_type_trans(skb, priv->ndev); 1307 skb->protocol = eth_type_trans(skb, priv->ndev);
1309 1308
@@ -1509,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
1509 int nr_frags = skb_shinfo(skb)->nr_frags; 1508 int nr_frags = skb_shinfo(skb)->nr_frags;
1510 int i; 1509 int i;
1511 1510
1512 db->wptr->len = skb->len - skb->data_len; 1511 db->wptr->len = skb_headlen(skb);
1513 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, 1512 db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
1514 db->wptr->len, PCI_DMA_TODEVICE); 1513 db->wptr->len, PCI_DMA_TODEVICE);
1515 pbl->len = CPU_CHIP_SWAP32(db->wptr->len); 1514 pbl->len = CPU_CHIP_SWAP32(db->wptr->len);
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 22cf1c446de3..7724d7e4ebd6 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,8 +67,8 @@
67#include "tg3.h" 67#include "tg3.h"
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define DRV_MODULE_VERSION "3.108" 70#define DRV_MODULE_VERSION "3.110"
71#define DRV_MODULE_RELDATE "February 17, 2010" 71#define DRV_MODULE_RELDATE "April 9, 2010"
72 72
73#define TG3_DEF_MAC_MODE 0 73#define TG3_DEF_MAC_MODE 0
74#define TG3_DEF_RX_MODE 0 74#define TG3_DEF_RX_MODE 0
@@ -101,7 +101,7 @@
101#define TG3_DEF_RX_RING_PENDING 200 101#define TG3_DEF_RX_RING_PENDING 200
102#define TG3_RX_JUMBO_RING_SIZE 256 102#define TG3_RX_JUMBO_RING_SIZE 256
103#define TG3_DEF_RX_JUMBO_RING_PENDING 100 103#define TG3_DEF_RX_JUMBO_RING_PENDING 100
104#define TG3_RSS_INDIR_TBL_SIZE 128 104#define TG3_RSS_INDIR_TBL_SIZE 128
105 105
106/* Do not place this n-ring entries value into the tp struct itself, 106/* Do not place this n-ring entries value into the tp struct itself,
107 * we really want to expose these constants to GCC so that modulo et 107 * we really want to expose these constants to GCC so that modulo et
@@ -126,6 +126,9 @@
126 TG3_TX_RING_SIZE) 126 TG3_TX_RING_SIZE)
127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1)) 127#define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
128 128
129#define TG3_RX_DMA_ALIGN 16
130#define TG3_RX_HEADROOM ALIGN(VLAN_HLEN, TG3_RX_DMA_ALIGN)
131
129#define TG3_DMA_BYTE_ENAB 64 132#define TG3_DMA_BYTE_ENAB 64
130 133
131#define TG3_RX_STD_DMA_SZ 1536 134#define TG3_RX_STD_DMA_SZ 1536
@@ -142,6 +145,26 @@
142#define TG3_RX_JMB_BUFF_RING_SIZE \ 145#define TG3_RX_JMB_BUFF_RING_SIZE \
143 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE) 146 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
144 147
148#define TG3_RSS_MIN_NUM_MSIX_VECS 2
149
150/* Due to a hardware bug, the 5701 can only DMA to memory addresses
151 * that are at least dword aligned when used in PCIX mode. The driver
152 * works around this bug by double copying the packet. This workaround
153 * is built into the normal double copy length check for efficiency.
154 *
155 * However, the double copy is only necessary on those architectures
156 * where unaligned memory accesses are inefficient. For those architectures
157 * where unaligned memory accesses incur little penalty, we can reintegrate
158 * the 5701 in the normal rx path. Doing so saves a device structure
159 * dereference by hardcoding the double copy threshold in place.
160 */
161#define TG3_RX_COPY_THRESHOLD 256
162#if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
163 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
164#else
165 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
166#endif
167
145/* minimum number of free TX descriptors required to wake up TX process */ 168/* minimum number of free TX descriptors required to wake up TX process */
146#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 169#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
147 170
@@ -152,6 +175,8 @@
152 175
153#define TG3_NUM_TEST 6 176#define TG3_NUM_TEST 6
154 177
178#define TG3_FW_UPDATE_TIMEOUT_SEC 5
179
155#define FIRMWARE_TG3 "tigon/tg3.bin" 180#define FIRMWARE_TG3 "tigon/tg3.bin"
156#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin" 181#define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
157#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin" 182#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
@@ -167,8 +192,6 @@ MODULE_FIRMWARE(FIRMWARE_TG3);
167MODULE_FIRMWARE(FIRMWARE_TG3TSO); 192MODULE_FIRMWARE(FIRMWARE_TG3TSO);
168MODULE_FIRMWARE(FIRMWARE_TG3TSO5); 193MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
169 194
170#define TG3_RSS_MIN_NUM_MSIX_VECS 2
171
172static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */ 195static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
173module_param(tg3_debug, int, 0); 196module_param(tg3_debug, int, 0);
174MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value"); 197MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
@@ -360,7 +383,7 @@ static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
360 383
361static u32 tg3_read32(struct tg3 *tp, u32 off) 384static u32 tg3_read32(struct tg3 *tp, u32 off)
362{ 385{
363 return (readl(tp->regs + off)); 386 return readl(tp->regs + off);
364} 387}
365 388
366static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val) 389static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
@@ -370,7 +393,7 @@ static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
370 393
371static u32 tg3_ape_read32(struct tg3 *tp, u32 off) 394static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
372{ 395{
373 return (readl(tp->aperegs + off)); 396 return readl(tp->aperegs + off);
374} 397}
375 398
376static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val) 399static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
@@ -488,7 +511,7 @@ static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
488 511
489static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off) 512static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
490{ 513{
491 return (readl(tp->regs + off + GRCMBOX_BASE)); 514 return readl(tp->regs + off + GRCMBOX_BASE);
492} 515}
493 516
494static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val) 517static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
@@ -496,16 +519,16 @@ static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
496 writel(val, tp->regs + off + GRCMBOX_BASE); 519 writel(val, tp->regs + off + GRCMBOX_BASE);
497} 520}
498 521
499#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val) 522#define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
500#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val)) 523#define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
501#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val) 524#define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
502#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val) 525#define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
503#define tr32_mailbox(reg) tp->read32_mbox(tp, reg) 526#define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
504 527
505#define tw32(reg,val) tp->write32(tp, reg, val) 528#define tw32(reg, val) tp->write32(tp, reg, val)
506#define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0) 529#define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
507#define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us)) 530#define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
508#define tr32(reg) tp->read32(tp, reg) 531#define tr32(reg) tp->read32(tp, reg)
509 532
510static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val) 533static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
511{ 534{
@@ -579,11 +602,11 @@ static int tg3_ape_lock(struct tg3 *tp, int locknum)
579 return 0; 602 return 0;
580 603
581 switch (locknum) { 604 switch (locknum) {
582 case TG3_APE_LOCK_GRC: 605 case TG3_APE_LOCK_GRC:
583 case TG3_APE_LOCK_MEM: 606 case TG3_APE_LOCK_MEM:
584 break; 607 break;
585 default: 608 default:
586 return -EINVAL; 609 return -EINVAL;
587 } 610 }
588 611
589 off = 4 * locknum; 612 off = 4 * locknum;
@@ -617,11 +640,11 @@ static void tg3_ape_unlock(struct tg3 *tp, int locknum)
617 return; 640 return;
618 641
619 switch (locknum) { 642 switch (locknum) {
620 case TG3_APE_LOCK_GRC: 643 case TG3_APE_LOCK_GRC:
621 case TG3_APE_LOCK_MEM: 644 case TG3_APE_LOCK_MEM:
622 break; 645 break;
623 default: 646 default:
624 return; 647 return;
625 } 648 }
626 649
627 off = 4 * locknum; 650 off = 4 * locknum;
@@ -651,6 +674,7 @@ static void tg3_enable_ints(struct tg3 *tp)
651 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE; 674 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
652 for (i = 0; i < tp->irq_cnt; i++) { 675 for (i = 0; i < tp->irq_cnt; i++) {
653 struct tg3_napi *tnapi = &tp->napi[i]; 676 struct tg3_napi *tnapi = &tp->napi[i];
677
654 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 678 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
655 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) 679 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
656 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24); 680 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -1098,7 +1122,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1098 1122
1099 i = mdiobus_register(tp->mdio_bus); 1123 i = mdiobus_register(tp->mdio_bus);
1100 if (i) { 1124 if (i) {
1101 netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i); 1125 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1102 mdiobus_free(tp->mdio_bus); 1126 mdiobus_free(tp->mdio_bus);
1103 return i; 1127 return i;
1104 } 1128 }
@@ -1106,7 +1130,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1106 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 1130 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1107 1131
1108 if (!phydev || !phydev->drv) { 1132 if (!phydev || !phydev->drv) {
1109 netdev_warn(tp->dev, "No PHY devices\n"); 1133 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1110 mdiobus_unregister(tp->mdio_bus); 1134 mdiobus_unregister(tp->mdio_bus);
1111 mdiobus_free(tp->mdio_bus); 1135 mdiobus_free(tp->mdio_bus);
1112 return -ENODEV; 1136 return -ENODEV;
@@ -1437,7 +1461,7 @@ static void tg3_adjust_link(struct net_device *dev)
1437 phydev->speed != tp->link_config.active_speed || 1461 phydev->speed != tp->link_config.active_speed ||
1438 phydev->duplex != tp->link_config.active_duplex || 1462 phydev->duplex != tp->link_config.active_duplex ||
1439 oldflowctrl != tp->link_config.active_flowctrl) 1463 oldflowctrl != tp->link_config.active_flowctrl)
1440 linkmesg = 1; 1464 linkmesg = 1;
1441 1465
1442 tp->link_config.active_speed = phydev->speed; 1466 tp->link_config.active_speed = phydev->speed;
1443 tp->link_config.active_duplex = phydev->duplex; 1467 tp->link_config.active_duplex = phydev->duplex;
@@ -1464,7 +1488,7 @@ static int tg3_phy_init(struct tg3 *tp)
1464 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1488 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1465 phydev->dev_flags, phydev->interface); 1489 phydev->dev_flags, phydev->interface);
1466 if (IS_ERR(phydev)) { 1490 if (IS_ERR(phydev)) {
1467 netdev_err(tp->dev, "Could not attach to PHY\n"); 1491 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1468 return PTR_ERR(phydev); 1492 return PTR_ERR(phydev);
1469 } 1493 }
1470 1494
@@ -1855,8 +1879,7 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1855 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) { 1879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1856 /* Set Extended packet length bit for jumbo frames */ 1880 /* Set Extended packet length bit for jumbo frames */
1857 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400); 1881 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1858 } 1882 } else {
1859 else {
1860 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1883 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1861 } 1884 }
1862 1885
@@ -1974,8 +1997,7 @@ out:
1974 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f); 1997 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1975 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2); 1998 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1976 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400); 1999 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1977 } 2000 } else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1978 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1979 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00); 2001 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1980 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a); 2002 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1981 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) { 2003 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
@@ -2007,8 +2029,8 @@ out:
2007 u32 phy_reg; 2029 u32 phy_reg;
2008 2030
2009 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg)) 2031 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
2010 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2032 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2011 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC); 2033 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2012 } 2034 }
2013 2035
2014 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
@@ -3425,7 +3447,7 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3425 ap->rxconfig = rx_cfg_reg; 3447 ap->rxconfig = rx_cfg_reg;
3426 ret = ANEG_OK; 3448 ret = ANEG_OK;
3427 3449
3428 switch(ap->state) { 3450 switch (ap->state) {
3429 case ANEG_STATE_UNKNOWN: 3451 case ANEG_STATE_UNKNOWN:
3430 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN)) 3452 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3431 ap->state = ANEG_STATE_AN_ENABLE; 3453 ap->state = ANEG_STATE_AN_ENABLE;
@@ -3463,11 +3485,10 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3463 /* fallthru */ 3485 /* fallthru */
3464 case ANEG_STATE_RESTART: 3486 case ANEG_STATE_RESTART:
3465 delta = ap->cur_time - ap->link_time; 3487 delta = ap->cur_time - ap->link_time;
3466 if (delta > ANEG_STATE_SETTLE_TIME) { 3488 if (delta > ANEG_STATE_SETTLE_TIME)
3467 ap->state = ANEG_STATE_ABILITY_DETECT_INIT; 3489 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3468 } else { 3490 else
3469 ret = ANEG_TIMER_ENAB; 3491 ret = ANEG_TIMER_ENAB;
3470 }
3471 break; 3492 break;
3472 3493
3473 case ANEG_STATE_DISABLE_LINK_OK: 3494 case ANEG_STATE_DISABLE_LINK_OK:
@@ -3491,9 +3512,8 @@ static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3491 break; 3512 break;
3492 3513
3493 case ANEG_STATE_ABILITY_DETECT: 3514 case ANEG_STATE_ABILITY_DETECT:
3494 if (ap->ability_match != 0 && ap->rxconfig != 0) { 3515 if (ap->ability_match != 0 && ap->rxconfig != 0)
3495 ap->state = ANEG_STATE_ACK_DETECT_INIT; 3516 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3496 }
3497 break; 3517 break;
3498 3518
3499 case ANEG_STATE_ACK_DETECT_INIT: 3519 case ANEG_STATE_ACK_DETECT_INIT:
@@ -4171,9 +4191,9 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4171 current_duplex = DUPLEX_FULL; 4191 current_duplex = DUPLEX_FULL;
4172 else 4192 else
4173 current_duplex = DUPLEX_HALF; 4193 current_duplex = DUPLEX_HALF;
4174 } 4194 } else {
4175 else
4176 current_link_up = 0; 4195 current_link_up = 0;
4196 }
4177 } 4197 }
4178 } 4198 }
4179 4199
@@ -4211,6 +4231,7 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4211 tp->serdes_counter--; 4231 tp->serdes_counter--;
4212 return; 4232 return;
4213 } 4233 }
4234
4214 if (!netif_carrier_ok(tp->dev) && 4235 if (!netif_carrier_ok(tp->dev) &&
4215 (tp->link_config.autoneg == AUTONEG_ENABLE)) { 4236 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4216 u32 bmcr; 4237 u32 bmcr;
@@ -4240,10 +4261,9 @@ static void tg3_serdes_parallel_detect(struct tg3 *tp)
4240 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT; 4261 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
4241 } 4262 }
4242 } 4263 }
4243 } 4264 } else if (netif_carrier_ok(tp->dev) &&
4244 else if (netif_carrier_ok(tp->dev) && 4265 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4245 (tp->link_config.autoneg == AUTONEG_ENABLE) && 4266 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4246 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
4247 u32 phy2; 4267 u32 phy2;
4248 4268
4249 /* Select expansion interrupt status register */ 4269 /* Select expansion interrupt status register */
@@ -4266,13 +4286,12 @@ static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4266{ 4286{
4267 int err; 4287 int err;
4268 4288
4269 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 4289 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
4270 err = tg3_setup_fiber_phy(tp, force_reset); 4290 err = tg3_setup_fiber_phy(tp, force_reset);
4271 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) { 4291 else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
4272 err = tg3_setup_fiber_mii_phy(tp, force_reset); 4292 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4273 } else { 4293 else
4274 err = tg3_setup_copper_phy(tp, force_reset); 4294 err = tg3_setup_copper_phy(tp, force_reset);
4275 }
4276 4295
4277 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) { 4296 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4278 u32 val, scale; 4297 u32 val, scale;
@@ -4335,8 +4354,11 @@ static void tg3_tx_recover(struct tg3 *tp)
4335 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) || 4354 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
4336 tp->write32_tx_mbox == tg3_write_indirect_mbox); 4355 tp->write32_tx_mbox == tg3_write_indirect_mbox);
4337 4356
4338 netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n" 4357 netdev_warn(tp->dev,
4339 "Please report the problem to the driver maintainer and include system chipset information.\n"); 4358 "The system may be re-ordering memory-mapped I/O "
4359 "cycles to the network device, attempting to recover. "
4360 "Please report the problem to the driver maintainer "
4361 "and include system chipset information.\n");
4340 4362
4341 spin_lock(&tp->lock); 4363 spin_lock(&tp->lock);
4342 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING; 4364 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4378,7 +4400,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4378 } 4400 }
4379 4401
4380 pci_unmap_single(tp->pdev, 4402 pci_unmap_single(tp->pdev,
4381 pci_unmap_addr(ri, mapping), 4403 dma_unmap_addr(ri, mapping),
4382 skb_headlen(skb), 4404 skb_headlen(skb),
4383 PCI_DMA_TODEVICE); 4405 PCI_DMA_TODEVICE);
4384 4406
@@ -4392,7 +4414,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
4392 tx_bug = 1; 4414 tx_bug = 1;
4393 4415
4394 pci_unmap_page(tp->pdev, 4416 pci_unmap_page(tp->pdev,
4395 pci_unmap_addr(ri, mapping), 4417 dma_unmap_addr(ri, mapping),
4396 skb_shinfo(skb)->frags[i].size, 4418 skb_shinfo(skb)->frags[i].size,
4397 PCI_DMA_TODEVICE); 4419 PCI_DMA_TODEVICE);
4398 sw_idx = NEXT_TX(sw_idx); 4420 sw_idx = NEXT_TX(sw_idx);
@@ -4430,7 +4452,7 @@ static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4430 if (!ri->skb) 4452 if (!ri->skb)
4431 return; 4453 return;
4432 4454
4433 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping), 4455 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4434 map_sz, PCI_DMA_FROMDEVICE); 4456 map_sz, PCI_DMA_FROMDEVICE);
4435 dev_kfree_skb_any(ri->skb); 4457 dev_kfree_skb_any(ri->skb);
4436 ri->skb = NULL; 4458 ri->skb = NULL;
@@ -4496,7 +4518,7 @@ static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4496 } 4518 }
4497 4519
4498 map->skb = skb; 4520 map->skb = skb;
4499 pci_unmap_addr_set(map, mapping, mapping); 4521 dma_unmap_addr_set(map, mapping, mapping);
4500 4522
4501 desc->addr_hi = ((u64)mapping >> 32); 4523 desc->addr_hi = ((u64)mapping >> 32);
4502 desc->addr_lo = ((u64)mapping & 0xffffffff); 4524 desc->addr_lo = ((u64)mapping & 0xffffffff);
@@ -4516,8 +4538,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4516 struct tg3 *tp = tnapi->tp; 4538 struct tg3 *tp = tnapi->tp;
4517 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4539 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4518 struct ring_info *src_map, *dest_map; 4540 struct ring_info *src_map, *dest_map;
4519 int dest_idx;
4520 struct tg3_rx_prodring_set *spr = &tp->prodring[0]; 4541 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4542 int dest_idx;
4521 4543
4522 switch (opaque_key) { 4544 switch (opaque_key) {
4523 case RXD_OPAQUE_RING_STD: 4545 case RXD_OPAQUE_RING_STD:
@@ -4541,8 +4563,8 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
4541 } 4563 }
4542 4564
4543 dest_map->skb = src_map->skb; 4565 dest_map->skb = src_map->skb;
4544 pci_unmap_addr_set(dest_map, mapping, 4566 dma_unmap_addr_set(dest_map, mapping,
4545 pci_unmap_addr(src_map, mapping)); 4567 dma_unmap_addr(src_map, mapping));
4546 dest_desc->addr_hi = src_desc->addr_hi; 4568 dest_desc->addr_hi = src_desc->addr_hi;
4547 dest_desc->addr_lo = src_desc->addr_lo; 4569 dest_desc->addr_lo = src_desc->addr_lo;
4548 4570
@@ -4605,18 +4627,20 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4605 struct sk_buff *skb; 4627 struct sk_buff *skb;
4606 dma_addr_t dma_addr; 4628 dma_addr_t dma_addr;
4607 u32 opaque_key, desc_idx, *post_ptr; 4629 u32 opaque_key, desc_idx, *post_ptr;
4630 bool hw_vlan __maybe_unused = false;
4631 u16 vtag __maybe_unused = 0;
4608 4632
4609 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4633 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4610 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4634 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4611 if (opaque_key == RXD_OPAQUE_RING_STD) { 4635 if (opaque_key == RXD_OPAQUE_RING_STD) {
4612 ri = &tp->prodring[0].rx_std_buffers[desc_idx]; 4636 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4613 dma_addr = pci_unmap_addr(ri, mapping); 4637 dma_addr = dma_unmap_addr(ri, mapping);
4614 skb = ri->skb; 4638 skb = ri->skb;
4615 post_ptr = &std_prod_idx; 4639 post_ptr = &std_prod_idx;
4616 rx_std_posted++; 4640 rx_std_posted++;
4617 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4641 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4618 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx]; 4642 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4619 dma_addr = pci_unmap_addr(ri, mapping); 4643 dma_addr = dma_unmap_addr(ri, mapping);
4620 skb = ri->skb; 4644 skb = ri->skb;
4621 post_ptr = &jmb_prod_idx; 4645 post_ptr = &jmb_prod_idx;
4622 } else 4646 } else
@@ -4638,12 +4662,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4638 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4662 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4639 ETH_FCS_LEN; 4663 ETH_FCS_LEN;
4640 4664
4641 if (len > RX_COPY_THRESHOLD && 4665 if (len > TG3_RX_COPY_THRESH(tp)) {
4642 tp->rx_offset == NET_IP_ALIGN) {
4643 /* rx_offset will likely not equal NET_IP_ALIGN
4644 * if this is a 5701 card running in PCI-X mode
4645 * [see tg3_get_invariants()]
4646 */
4647 int skb_size; 4666 int skb_size;
4648 4667
4649 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key, 4668 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
@@ -4668,12 +4687,12 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4668 tg3_recycle_rx(tnapi, tpr, opaque_key, 4687 tg3_recycle_rx(tnapi, tpr, opaque_key,
4669 desc_idx, *post_ptr); 4688 desc_idx, *post_ptr);
4670 4689
4671 copy_skb = netdev_alloc_skb(tp->dev, 4690 copy_skb = netdev_alloc_skb(tp->dev, len + VLAN_HLEN +
4672 len + TG3_RAW_IP_ALIGN); 4691 TG3_RAW_IP_ALIGN);
4673 if (copy_skb == NULL) 4692 if (copy_skb == NULL)
4674 goto drop_it_no_recycle; 4693 goto drop_it_no_recycle;
4675 4694
4676 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN); 4695 skb_reserve(copy_skb, TG3_RAW_IP_ALIGN + VLAN_HLEN);
4677 skb_put(copy_skb, len); 4696 skb_put(copy_skb, len);
4678 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); 4697 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4679 skb_copy_from_linear_data(skb, copy_skb->data, len); 4698 skb_copy_from_linear_data(skb, copy_skb->data, len);
@@ -4699,12 +4718,29 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4699 goto next_pkt; 4718 goto next_pkt;
4700 } 4719 }
4701 4720
4721 if (desc->type_flags & RXD_FLAG_VLAN &&
4722 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG)) {
4723 vtag = desc->err_vlan & RXD_VLAN_MASK;
4702#if TG3_VLAN_TAG_USED 4724#if TG3_VLAN_TAG_USED
4703 if (tp->vlgrp != NULL && 4725 if (tp->vlgrp)
4704 desc->type_flags & RXD_FLAG_VLAN) { 4726 hw_vlan = true;
4705 vlan_gro_receive(&tnapi->napi, tp->vlgrp, 4727 else
4706 desc->err_vlan & RXD_VLAN_MASK, skb); 4728#endif
4707 } else 4729 {
4730 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
4731 __skb_push(skb, VLAN_HLEN);
4732
4733 memmove(ve, skb->data + VLAN_HLEN,
4734 ETH_ALEN * 2);
4735 ve->h_vlan_proto = htons(ETH_P_8021Q);
4736 ve->h_vlan_TCI = htons(vtag);
4737 }
4738 }
4739
4740#if TG3_VLAN_TAG_USED
4741 if (hw_vlan)
4742 vlan_gro_receive(&tnapi->napi, tp->vlgrp, vtag, skb);
4743 else
4708#endif 4744#endif
4709 napi_gro_receive(&tnapi->napi, skb); 4745 napi_gro_receive(&tnapi->napi, skb);
4710 4746
@@ -4978,7 +5014,7 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
4978 if (unlikely(work_done >= budget)) 5014 if (unlikely(work_done >= budget))
4979 break; 5015 break;
4980 5016
4981 /* tp->last_tag is used in tg3_restart_ints() below 5017 /* tp->last_tag is used in tg3_int_reenable() below
4982 * to tell the hw how much work has been processed, 5018 * to tell the hw how much work has been processed,
4983 * so we must read it before checking for more work. 5019 * so we must read it before checking for more work.
4984 */ 5020 */
@@ -4987,8 +5023,8 @@ static int tg3_poll_msix(struct napi_struct *napi, int budget)
4987 rmb(); 5023 rmb();
4988 5024
4989 /* check for RX/TX work to do */ 5025 /* check for RX/TX work to do */
4990 if (sblk->idx[0].tx_consumer == tnapi->tx_cons && 5026 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4991 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) { 5027 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
4992 napi_complete(napi); 5028 napi_complete(napi);
4993 /* Reenable interrupts. */ 5029 /* Reenable interrupts. */
4994 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24); 5030 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
@@ -5260,7 +5296,8 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5260 5296
5261 err = tg3_init_hw(tp, reset_phy); 5297 err = tg3_init_hw(tp, reset_phy);
5262 if (err) { 5298 if (err) {
5263 netdev_err(tp->dev, "Failed to re-initialize device, aborting\n"); 5299 netdev_err(tp->dev,
5300 "Failed to re-initialize device, aborting\n");
5264 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 5301 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5265 tg3_full_unlock(tp); 5302 tg3_full_unlock(tp);
5266 del_timer_sync(&tp->timer); 5303 del_timer_sync(&tp->timer);
@@ -5437,12 +5474,12 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5437 len = skb_shinfo(skb)->frags[i-1].size; 5474 len = skb_shinfo(skb)->frags[i-1].size;
5438 5475
5439 pci_unmap_single(tp->pdev, 5476 pci_unmap_single(tp->pdev,
5440 pci_unmap_addr(&tnapi->tx_buffers[entry], 5477 dma_unmap_addr(&tnapi->tx_buffers[entry],
5441 mapping), 5478 mapping),
5442 len, PCI_DMA_TODEVICE); 5479 len, PCI_DMA_TODEVICE);
5443 if (i == 0) { 5480 if (i == 0) {
5444 tnapi->tx_buffers[entry].skb = new_skb; 5481 tnapi->tx_buffers[entry].skb = new_skb;
5445 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5482 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5446 new_addr); 5483 new_addr);
5447 } else { 5484 } else {
5448 tnapi->tx_buffers[entry].skb = NULL; 5485 tnapi->tx_buffers[entry].skb = NULL;
@@ -5492,7 +5529,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5492 struct netdev_queue *txq; 5529 struct netdev_queue *txq;
5493 unsigned int i, last; 5530 unsigned int i, last;
5494 5531
5495
5496 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5532 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5497 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5533 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5498 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5534 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5508,7 +5544,8 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5508 netif_tx_stop_queue(txq); 5544 netif_tx_stop_queue(txq);
5509 5545
5510 /* This is a hard error, log it. */ 5546 /* This is a hard error, log it. */
5511 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 5547 netdev_err(dev,
5548 "BUG! Tx Ring full when queue awake!\n");
5512 } 5549 }
5513 return NETDEV_TX_BUSY; 5550 return NETDEV_TX_BUSY;
5514 } 5551 }
@@ -5552,9 +5589,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5552 5589
5553 tcp_hdr(skb)->check = 0; 5590 tcp_hdr(skb)->check = 0;
5554 5591
5555 } 5592 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5556 else if (skb->ip_summed == CHECKSUM_PARTIAL)
5557 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5593 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5594 }
5595
5558#if TG3_VLAN_TAG_USED 5596#if TG3_VLAN_TAG_USED
5559 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb)) 5597 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5560 base_flags |= (TXD_FLAG_VLAN | 5598 base_flags |= (TXD_FLAG_VLAN |
@@ -5571,7 +5609,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5571 } 5609 }
5572 5610
5573 tnapi->tx_buffers[entry].skb = skb; 5611 tnapi->tx_buffers[entry].skb = skb;
5574 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5612 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5575 5613
5576 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5614 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5577 !mss && skb->len > ETH_DATA_LEN) 5615 !mss && skb->len > ETH_DATA_LEN)
@@ -5597,7 +5635,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5597 goto dma_error; 5635 goto dma_error;
5598 5636
5599 tnapi->tx_buffers[entry].skb = NULL; 5637 tnapi->tx_buffers[entry].skb = NULL;
5600 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5638 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5601 mapping); 5639 mapping);
5602 5640
5603 tg3_set_txd(tnapi, entry, mapping, len, 5641 tg3_set_txd(tnapi, entry, mapping, len,
@@ -5627,7 +5665,7 @@ dma_error:
5627 entry = tnapi->tx_prod; 5665 entry = tnapi->tx_prod;
5628 tnapi->tx_buffers[entry].skb = NULL; 5666 tnapi->tx_buffers[entry].skb = NULL;
5629 pci_unmap_single(tp->pdev, 5667 pci_unmap_single(tp->pdev,
5630 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5668 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5631 skb_headlen(skb), 5669 skb_headlen(skb),
5632 PCI_DMA_TODEVICE); 5670 PCI_DMA_TODEVICE);
5633 for (i = 0; i <= last; i++) { 5671 for (i = 0; i <= last; i++) {
@@ -5635,7 +5673,7 @@ dma_error:
5635 entry = NEXT_TX(entry); 5673 entry = NEXT_TX(entry);
5636 5674
5637 pci_unmap_page(tp->pdev, 5675 pci_unmap_page(tp->pdev,
5638 pci_unmap_addr(&tnapi->tx_buffers[entry], 5676 dma_unmap_addr(&tnapi->tx_buffers[entry],
5639 mapping), 5677 mapping),
5640 frag->size, PCI_DMA_TODEVICE); 5678 frag->size, PCI_DMA_TODEVICE);
5641 } 5679 }
@@ -5695,7 +5733,6 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5695 struct netdev_queue *txq; 5733 struct netdev_queue *txq;
5696 unsigned int i, last; 5734 unsigned int i, last;
5697 5735
5698
5699 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb)); 5736 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5700 tnapi = &tp->napi[skb_get_queue_mapping(skb)]; 5737 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5701 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS) 5738 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
@@ -5711,7 +5748,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5711 netif_tx_stop_queue(txq); 5748 netif_tx_stop_queue(txq);
5712 5749
5713 /* This is a hard error, log it. */ 5750 /* This is a hard error, log it. */
5714 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 5751 netdev_err(dev,
5752 "BUG! Tx Ring full when queue awake!\n");
5715 } 5753 }
5716 return NETDEV_TX_BUSY; 5754 return NETDEV_TX_BUSY;
5717 } 5755 }
@@ -5737,7 +5775,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5737 hdr_len = ip_tcp_len + tcp_opt_len; 5775 hdr_len = ip_tcp_len + tcp_opt_len;
5738 if (unlikely((ETH_HLEN + hdr_len) > 80) && 5776 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5739 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG)) 5777 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5740 return (tg3_tso_bug(tp, skb)); 5778 return tg3_tso_bug(tp, skb);
5741 5779
5742 base_flags |= (TXD_FLAG_CPU_PRE_DMA | 5780 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5743 TXD_FLAG_CPU_POST_DMA); 5781 TXD_FLAG_CPU_POST_DMA);
@@ -5797,7 +5835,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5797 } 5835 }
5798 5836
5799 tnapi->tx_buffers[entry].skb = skb; 5837 tnapi->tx_buffers[entry].skb = skb;
5800 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5838 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5801 5839
5802 would_hit_hwbug = 0; 5840 would_hit_hwbug = 0;
5803 5841
@@ -5833,7 +5871,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5833 len, PCI_DMA_TODEVICE); 5871 len, PCI_DMA_TODEVICE);
5834 5872
5835 tnapi->tx_buffers[entry].skb = NULL; 5873 tnapi->tx_buffers[entry].skb = NULL;
5836 pci_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, 5874 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
5837 mapping); 5875 mapping);
5838 if (pci_dma_mapping_error(tp->pdev, mapping)) 5876 if (pci_dma_mapping_error(tp->pdev, mapping))
5839 goto dma_error; 5877 goto dma_error;
@@ -5898,7 +5936,7 @@ dma_error:
5898 entry = tnapi->tx_prod; 5936 entry = tnapi->tx_prod;
5899 tnapi->tx_buffers[entry].skb = NULL; 5937 tnapi->tx_buffers[entry].skb = NULL;
5900 pci_unmap_single(tp->pdev, 5938 pci_unmap_single(tp->pdev,
5901 pci_unmap_addr(&tnapi->tx_buffers[entry], mapping), 5939 dma_unmap_addr(&tnapi->tx_buffers[entry], mapping),
5902 skb_headlen(skb), 5940 skb_headlen(skb),
5903 PCI_DMA_TODEVICE); 5941 PCI_DMA_TODEVICE);
5904 for (i = 0; i <= last; i++) { 5942 for (i = 0; i <= last; i++) {
@@ -5906,7 +5944,7 @@ dma_error:
5906 entry = NEXT_TX(entry); 5944 entry = NEXT_TX(entry);
5907 5945
5908 pci_unmap_page(tp->pdev, 5946 pci_unmap_page(tp->pdev,
5909 pci_unmap_addr(&tnapi->tx_buffers[entry], 5947 dma_unmap_addr(&tnapi->tx_buffers[entry],
5910 mapping), 5948 mapping),
5911 frag->size, PCI_DMA_TODEVICE); 5949 frag->size, PCI_DMA_TODEVICE);
5912 } 5950 }
@@ -5924,9 +5962,9 @@ static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5924 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) { 5962 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5925 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE; 5963 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5926 ethtool_op_set_tso(dev, 0); 5964 ethtool_op_set_tso(dev, 0);
5927 } 5965 } else {
5928 else
5929 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE; 5966 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5967 }
5930 } else { 5968 } else {
5931 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) 5969 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5932 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 5970 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
@@ -6007,7 +6045,7 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
6007 } 6045 }
6008} 6046}
6009 6047
6010/* Initialize tx/rx rings for packet processing. 6048/* Initialize rx rings for packet processing.
6011 * 6049 *
6012 * The chip has been shut down and the driver detached from 6050 * The chip has been shut down and the driver detached from
6013 * the networking, so no interrupts or new tx packets will 6051 * the networking, so no interrupts or new tx packets will
@@ -6058,8 +6096,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6058 /* Now allocate fresh SKBs for each rx ring. */ 6096 /* Now allocate fresh SKBs for each rx ring. */
6059 for (i = 0; i < tp->rx_pending; i++) { 6097 for (i = 0; i < tp->rx_pending; i++) {
6060 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) { 6098 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6061 netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n", 6099 netdev_warn(tp->dev,
6062 i, tp->rx_pending); 6100 "Using a smaller RX standard ring. Only "
6101 "%d out of %d buffers were allocated "
6102 "successfully\n", i, tp->rx_pending);
6063 if (i == 0) 6103 if (i == 0)
6064 goto initfail; 6104 goto initfail;
6065 tp->rx_pending = i; 6105 tp->rx_pending = i;
@@ -6088,8 +6128,10 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
6088 6128
6089 for (i = 0; i < tp->rx_jumbo_pending; i++) { 6129 for (i = 0; i < tp->rx_jumbo_pending; i++) {
6090 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) { 6130 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6091 netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n", 6131 netdev_warn(tp->dev,
6092 i, tp->rx_jumbo_pending); 6132 "Using a smaller RX jumbo ring. Only %d "
6133 "out of %d buffers were allocated "
6134 "successfully\n", i, tp->rx_jumbo_pending);
6093 if (i == 0) 6135 if (i == 0)
6094 goto initfail; 6136 goto initfail;
6095 tp->rx_jumbo_pending = i; 6137 tp->rx_jumbo_pending = i;
@@ -6187,7 +6229,7 @@ static void tg3_free_rings(struct tg3 *tp)
6187 } 6229 }
6188 6230
6189 pci_unmap_single(tp->pdev, 6231 pci_unmap_single(tp->pdev,
6190 pci_unmap_addr(txp, mapping), 6232 dma_unmap_addr(txp, mapping),
6191 skb_headlen(skb), 6233 skb_headlen(skb),
6192 PCI_DMA_TODEVICE); 6234 PCI_DMA_TODEVICE);
6193 txp->skb = NULL; 6235 txp->skb = NULL;
@@ -6197,7 +6239,7 @@ static void tg3_free_rings(struct tg3 *tp)
6197 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) { 6239 for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6198 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)]; 6240 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6199 pci_unmap_page(tp->pdev, 6241 pci_unmap_page(tp->pdev,
6200 pci_unmap_addr(txp, mapping), 6242 dma_unmap_addr(txp, mapping),
6201 skb_shinfo(skb)->frags[k].size, 6243 skb_shinfo(skb)->frags[k].size,
6202 PCI_DMA_TODEVICE); 6244 PCI_DMA_TODEVICE);
6203 i++; 6245 i++;
@@ -6433,8 +6475,9 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
6433 } 6475 }
6434 6476
6435 if (i == MAX_WAIT_CNT && !silent) { 6477 if (i == MAX_WAIT_CNT && !silent) {
6436 pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n", 6478 dev_err(&tp->pdev->dev,
6437 ofs, enable_bit); 6479 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6480 ofs, enable_bit);
6438 return -ENODEV; 6481 return -ENODEV;
6439 } 6482 }
6440 6483
@@ -6480,8 +6523,9 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
6480 break; 6523 break;
6481 } 6524 }
6482 if (i >= MAX_WAIT_CNT) { 6525 if (i >= MAX_WAIT_CNT) {
6483 netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n", 6526 dev_err(&tp->pdev->dev,
6484 __func__, tr32(MAC_TX_MODE)); 6527 "%s timed out, TX_MODE_ENABLE will not clear "
6528 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6485 err |= -ENODEV; 6529 err |= -ENODEV;
6486 } 6530 }
6487 6531
@@ -6551,35 +6595,35 @@ static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6551 return; 6595 return;
6552 6596
6553 switch (kind) { 6597 switch (kind) {
6554 case RESET_KIND_INIT: 6598 case RESET_KIND_INIT:
6555 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 6599 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6556 APE_HOST_SEG_SIG_MAGIC); 6600 APE_HOST_SEG_SIG_MAGIC);
6557 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN, 6601 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6558 APE_HOST_SEG_LEN_MAGIC); 6602 APE_HOST_SEG_LEN_MAGIC);
6559 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT); 6603 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6560 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata); 6604 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6561 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID, 6605 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6562 APE_HOST_DRIVER_ID_MAGIC); 6606 APE_HOST_DRIVER_ID_MAGIC);
6563 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR, 6607 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6564 APE_HOST_BEHAV_NO_PHYLOCK); 6608 APE_HOST_BEHAV_NO_PHYLOCK);
6565 6609
6566 event = APE_EVENT_STATUS_STATE_START; 6610 event = APE_EVENT_STATUS_STATE_START;
6567 break; 6611 break;
6568 case RESET_KIND_SHUTDOWN: 6612 case RESET_KIND_SHUTDOWN:
6569 /* With the interface we are currently using, 6613 /* With the interface we are currently using,
6570 * APE does not track driver state. Wiping 6614 * APE does not track driver state. Wiping
6571 * out the HOST SEGMENT SIGNATURE forces 6615 * out the HOST SEGMENT SIGNATURE forces
6572 * the APE to assume OS absent status. 6616 * the APE to assume OS absent status.
6573 */ 6617 */
6574 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0); 6618 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6575 6619
6576 event = APE_EVENT_STATUS_STATE_UNLOAD; 6620 event = APE_EVENT_STATUS_STATE_UNLOAD;
6577 break; 6621 break;
6578 case RESET_KIND_SUSPEND: 6622 case RESET_KIND_SUSPEND:
6579 event = APE_EVENT_STATUS_STATE_SUSPEND; 6623 event = APE_EVENT_STATUS_STATE_SUSPEND;
6580 break; 6624 break;
6581 default: 6625 default:
6582 return; 6626 return;
6583 } 6627 }
6584 6628
6585 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE; 6629 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
@@ -7156,7 +7200,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
7156 7200
7157 if (cpu_base == TX_CPU_BASE && 7201 if (cpu_base == TX_CPU_BASE &&
7158 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) { 7202 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7159 netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n", 7203 netdev_err(tp->dev,
7204 "%s: Trying to load TX cpu firmware which is 5705\n",
7160 __func__); 7205 __func__);
7161 return -EINVAL; 7206 return -EINVAL;
7162 } 7207 }
@@ -7236,7 +7281,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7236 udelay(1000); 7281 udelay(1000);
7237 } 7282 }
7238 if (i >= 5) { 7283 if (i >= 5) {
7239 netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n", 7284 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7285 "should be %08x\n", __func__,
7240 tr32(RX_CPU_BASE + CPU_PC), info.fw_base); 7286 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7241 return -ENODEV; 7287 return -ENODEV;
7242 } 7288 }
@@ -7300,7 +7346,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
7300 udelay(1000); 7346 udelay(1000);
7301 } 7347 }
7302 if (i >= 5) { 7348 if (i >= 5) {
7303 netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n", 7349 netdev_err(tp->dev,
7350 "%s fails to set CPU PC, is %08x should be %08x\n",
7304 __func__, tr32(cpu_base + CPU_PC), info.fw_base); 7351 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7305 return -ENODEV; 7352 return -ENODEV;
7306 } 7353 }
@@ -7568,9 +7615,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7568 7615
7569 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT); 7616 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7570 7617
7571 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) { 7618 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7572 tg3_abort_hw(tp, 1); 7619 tg3_abort_hw(tp, 1);
7573 }
7574 7620
7575 if (reset_phy) 7621 if (reset_phy)
7576 tg3_phy_reset(tp); 7622 tg3_phy_reset(tp);
@@ -7631,6 +7677,25 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7631 tw32(GRC_MODE, grc_mode); 7677 tw32(GRC_MODE, grc_mode);
7632 } 7678 }
7633 7679
7680 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7681 u32 grc_mode = tr32(GRC_MODE);
7682
7683 /* Access the lower 1K of PL PCIE block registers. */
7684 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7685 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7686
7687 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
7688 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7689 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7690
7691 tw32(GRC_MODE, grc_mode);
7692
7693 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7694 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7695 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7696 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7697 }
7698
7634 /* This works around an issue with Athlon chipsets on 7699 /* This works around an issue with Athlon chipsets on
7635 * B3 tigon3 silicon. This bit has no effect on any 7700 * B3 tigon3 silicon. This bit has no effect on any
7636 * other revision. But do not set this on PCI Express 7701 * other revision. But do not set this on PCI Express
@@ -7679,6 +7744,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7679 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) { 7744 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7680 val = tr32(TG3PCI_DMA_RW_CTRL) & 7745 val = tr32(TG3PCI_DMA_RW_CTRL) &
7681 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT; 7746 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7747 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
7748 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
7682 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl); 7749 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7683 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7750 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7684 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) { 7751 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
@@ -7723,8 +7790,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7723 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96); 7790 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7724 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE); 7791 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7725 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE); 7792 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7726 } 7793 } else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7727 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7728 int fw_len; 7794 int fw_len;
7729 7795
7730 fw_len = tp->fw_len; 7796 fw_len = tp->fw_len;
@@ -7839,9 +7905,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7839 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 7905 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
7840 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) 7906 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7841 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) | 7907 val = (RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT) |
7842 (RX_STD_MAX_SIZE << 2); 7908 (TG3_RX_STD_DMA_SZ << 2);
7843 else 7909 else
7844 val = RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT; 7910 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
7845 } else 7911 } else
7846 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT; 7912 val = RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT;
7847 7913
@@ -8476,8 +8542,8 @@ static void tg3_timer(unsigned long __opaque)
8476 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, 8542 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8477 FWCMD_NICDRV_ALIVE3); 8543 FWCMD_NICDRV_ALIVE3);
8478 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4); 8544 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8479 /* 5 seconds timeout */ 8545 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8480 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5); 8546 TG3_FW_UPDATE_TIMEOUT_SEC);
8481 8547
8482 tg3_generate_fw_event(tp); 8548 tg3_generate_fw_event(tp);
8483 } 8549 }
@@ -8625,8 +8691,9 @@ static int tg3_test_msi(struct tg3 *tp)
8625 return err; 8691 return err;
8626 8692
8627 /* MSI test failed, go back to INTx mode */ 8693 /* MSI test failed, go back to INTx mode */
8628 netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n" 8694 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
8629 "Please report this failure to the PCI maintainer and include system chipset information\n"); 8695 "to INTx mode. Please report this failure to the PCI "
8696 "maintainer and include system chipset information\n");
8630 8697
8631 free_irq(tp->napi[0].irq_vec, &tp->napi[0]); 8698 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
8632 8699
@@ -8738,7 +8805,8 @@ static void tg3_ints_init(struct tg3 *tp)
8738 /* All MSI supporting chips should support tagged 8805 /* All MSI supporting chips should support tagged
8739 * status. Assert that this is the case. 8806 * status. Assert that this is the case.
8740 */ 8807 */
8741 netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n"); 8808 netdev_warn(tp->dev,
8809 "MSI without TAGGED_STATUS? Not using MSI\n");
8742 goto defcfg; 8810 goto defcfg;
8743 } 8811 }
8744 8812
@@ -8913,236 +8981,6 @@ err_out1:
8913 return err; 8981 return err;
8914} 8982}
8915 8983
8916#if 0
8917/*static*/ void tg3_dump_state(struct tg3 *tp)
8918{
8919 u32 val32, val32_2, val32_3, val32_4, val32_5;
8920 u16 val16;
8921 int i;
8922 struct tg3_hw_status *sblk = tp->napi[0]->hw_status;
8923
8924 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8925 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8926 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8927 val16, val32);
8928
8929 /* MAC block */
8930 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8931 tr32(MAC_MODE), tr32(MAC_STATUS));
8932 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8933 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8934 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8935 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8936 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8937 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8938
8939 /* Send data initiator control block */
8940 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8941 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8942 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8943 tr32(SNDDATAI_STATSCTRL));
8944
8945 /* Send data completion control block */
8946 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8947
8948 /* Send BD ring selector block */
8949 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8950 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8951
8952 /* Send BD initiator control block */
8953 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8954 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8955
8956 /* Send BD completion control block */
8957 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8958
8959 /* Receive list placement control block */
8960 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8961 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8962 printk(" RCVLPC_STATSCTRL[%08x]\n",
8963 tr32(RCVLPC_STATSCTRL));
8964
8965 /* Receive data and receive BD initiator control block */
8966 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8967 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8968
8969 /* Receive data completion control block */
8970 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8971 tr32(RCVDCC_MODE));
8972
8973 /* Receive BD initiator control block */
8974 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8975 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8976
8977 /* Receive BD completion control block */
8978 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8979 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8980
8981 /* Receive list selector control block */
8982 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8983 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8984
8985 /* Mbuf cluster free block */
8986 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8987 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8988
8989 /* Host coalescing control block */
8990 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8991 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8992 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8993 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8994 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8995 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8996 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8997 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8998 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8999 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
9000 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
9001 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
9002
9003 /* Memory arbiter control block */
9004 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
9005 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
9006
9007 /* Buffer manager control block */
9008 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
9009 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
9010 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
9011 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
9012 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
9013 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
9014 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
9015 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
9016
9017 /* Read DMA control block */
9018 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
9019 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
9020
9021 /* Write DMA control block */
9022 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
9023 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
9024
9025 /* DMA completion block */
9026 printk("DEBUG: DMAC_MODE[%08x]\n",
9027 tr32(DMAC_MODE));
9028
9029 /* GRC block */
9030 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
9031 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
9032 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
9033 tr32(GRC_LOCAL_CTRL));
9034
9035 /* TG3_BDINFOs */
9036 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
9037 tr32(RCVDBDI_JUMBO_BD + 0x0),
9038 tr32(RCVDBDI_JUMBO_BD + 0x4),
9039 tr32(RCVDBDI_JUMBO_BD + 0x8),
9040 tr32(RCVDBDI_JUMBO_BD + 0xc));
9041 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
9042 tr32(RCVDBDI_STD_BD + 0x0),
9043 tr32(RCVDBDI_STD_BD + 0x4),
9044 tr32(RCVDBDI_STD_BD + 0x8),
9045 tr32(RCVDBDI_STD_BD + 0xc));
9046 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
9047 tr32(RCVDBDI_MINI_BD + 0x0),
9048 tr32(RCVDBDI_MINI_BD + 0x4),
9049 tr32(RCVDBDI_MINI_BD + 0x8),
9050 tr32(RCVDBDI_MINI_BD + 0xc));
9051
9052 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
9053 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
9054 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
9055 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
9056 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
9057 val32, val32_2, val32_3, val32_4);
9058
9059 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
9060 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
9061 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
9062 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
9063 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
9064 val32, val32_2, val32_3, val32_4);
9065
9066 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
9067 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
9068 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
9069 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
9070 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
9071 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
9072 val32, val32_2, val32_3, val32_4, val32_5);
9073
9074 /* SW status block */
9075 printk(KERN_DEBUG
9076 "Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
9077 sblk->status,
9078 sblk->status_tag,
9079 sblk->rx_jumbo_consumer,
9080 sblk->rx_consumer,
9081 sblk->rx_mini_consumer,
9082 sblk->idx[0].rx_producer,
9083 sblk->idx[0].tx_consumer);
9084
9085 /* SW statistics block */
9086 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
9087 ((u32 *)tp->hw_stats)[0],
9088 ((u32 *)tp->hw_stats)[1],
9089 ((u32 *)tp->hw_stats)[2],
9090 ((u32 *)tp->hw_stats)[3]);
9091
9092 /* Mailboxes */
9093 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
9094 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
9095 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
9096 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
9097 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
9098
9099 /* NIC side send descriptors. */
9100 for (i = 0; i < 6; i++) {
9101 unsigned long txd;
9102
9103 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
9104 + (i * sizeof(struct tg3_tx_buffer_desc));
9105 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
9106 i,
9107 readl(txd + 0x0), readl(txd + 0x4),
9108 readl(txd + 0x8), readl(txd + 0xc));
9109 }
9110
9111 /* NIC side RX descriptors. */
9112 for (i = 0; i < 6; i++) {
9113 unsigned long rxd;
9114
9115 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
9116 + (i * sizeof(struct tg3_rx_buffer_desc));
9117 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
9118 i,
9119 readl(rxd + 0x0), readl(rxd + 0x4),
9120 readl(rxd + 0x8), readl(rxd + 0xc));
9121 rxd += (4 * sizeof(u32));
9122 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
9123 i,
9124 readl(rxd + 0x0), readl(rxd + 0x4),
9125 readl(rxd + 0x8), readl(rxd + 0xc));
9126 }
9127
9128 for (i = 0; i < 6; i++) {
9129 unsigned long rxd;
9130
9131 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
9132 + (i * sizeof(struct tg3_rx_buffer_desc));
9133 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
9134 i,
9135 readl(rxd + 0x0), readl(rxd + 0x4),
9136 readl(rxd + 0x8), readl(rxd + 0xc));
9137 rxd += (4 * sizeof(u32));
9138 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
9139 i,
9140 readl(rxd + 0x0), readl(rxd + 0x4),
9141 readl(rxd + 0x8), readl(rxd + 0xc));
9142 }
9143}
9144#endif
9145
9146static struct net_device_stats *tg3_get_stats(struct net_device *); 8984static struct net_device_stats *tg3_get_stats(struct net_device *);
9147static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *); 8985static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9148 8986
@@ -9161,9 +8999,6 @@ static int tg3_close(struct net_device *dev)
9161 tg3_phy_stop(tp); 8999 tg3_phy_stop(tp);
9162 9000
9163 tg3_full_lock(tp, 1); 9001 tg3_full_lock(tp, 1);
9164#if 0
9165 tg3_dump_state(tp);
9166#endif
9167 9002
9168 tg3_disable_ints(tp); 9003 tg3_disable_ints(tp);
9169 9004
@@ -9405,9 +9240,8 @@ static inline u32 calc_crc(unsigned char *buf, int len)
9405 9240
9406 reg >>= 1; 9241 reg >>= 1;
9407 9242
9408 if (tmp) { 9243 if (tmp)
9409 reg ^= 0xedb88320; 9244 reg ^= 0xedb88320;
9410 }
9411 } 9245 }
9412 } 9246 }
9413 9247
@@ -9451,20 +9285,20 @@ static void __tg3_set_rx_mode(struct net_device *dev)
9451 rx_mode |= RX_MODE_PROMISC; 9285 rx_mode |= RX_MODE_PROMISC;
9452 } else if (dev->flags & IFF_ALLMULTI) { 9286 } else if (dev->flags & IFF_ALLMULTI) {
9453 /* Accept all multicast. */ 9287 /* Accept all multicast. */
9454 tg3_set_multi (tp, 1); 9288 tg3_set_multi(tp, 1);
9455 } else if (netdev_mc_empty(dev)) { 9289 } else if (netdev_mc_empty(dev)) {
9456 /* Reject all multicast. */ 9290 /* Reject all multicast. */
9457 tg3_set_multi (tp, 0); 9291 tg3_set_multi(tp, 0);
9458 } else { 9292 } else {
9459 /* Accept one or more multicast(s). */ 9293 /* Accept one or more multicast(s). */
9460 struct dev_mc_list *mclist; 9294 struct netdev_hw_addr *ha;
9461 u32 mc_filter[4] = { 0, }; 9295 u32 mc_filter[4] = { 0, };
9462 u32 regidx; 9296 u32 regidx;
9463 u32 bit; 9297 u32 bit;
9464 u32 crc; 9298 u32 crc;
9465 9299
9466 netdev_for_each_mc_addr(mclist, dev) { 9300 netdev_for_each_mc_addr(ha, dev) {
9467 crc = calc_crc (mclist->dmi_addr, ETH_ALEN); 9301 crc = calc_crc(ha->addr, ETH_ALEN);
9468 bit = ~crc & 0x7f; 9302 bit = ~crc & 0x7f;
9469 regidx = (bit & 0x60) >> 5; 9303 regidx = (bit & 0x60) >> 5;
9470 bit &= 0x1f; 9304 bit &= 0x1f;
@@ -9617,7 +9451,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
9617 memcpy(data, ((char*)&val) + b_offset, b_count); 9451 memcpy(data, ((char*)&val) + b_offset, b_count);
9618 len -= b_count; 9452 len -= b_count;
9619 offset += b_count; 9453 offset += b_count;
9620 eeprom->len += b_count; 9454 eeprom->len += b_count;
9621 } 9455 }
9622 9456
9623 /* read bytes upto the last 4 byte boundary */ 9457 /* read bytes upto the last 4 byte boundary */
@@ -10165,8 +9999,8 @@ static int tg3_set_rx_csum(struct net_device *dev, u32 data)
10165 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 9999 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10166 if (data != 0) 10000 if (data != 0)
10167 return -EINVAL; 10001 return -EINVAL;
10168 return 0; 10002 return 0;
10169 } 10003 }
10170 10004
10171 spin_lock_bh(&tp->lock); 10005 spin_lock_bh(&tp->lock);
10172 if (data) 10006 if (data)
@@ -10185,8 +10019,8 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10185 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) { 10019 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
10186 if (data != 0) 10020 if (data != 0)
10187 return -EINVAL; 10021 return -EINVAL;
10188 return 0; 10022 return 0;
10189 } 10023 }
10190 10024
10191 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 10025 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
10192 ethtool_op_set_tx_ipv6_csum(dev, data); 10026 ethtool_op_set_tx_ipv6_csum(dev, data);
@@ -10196,7 +10030,7 @@ static int tg3_set_tx_csum(struct net_device *dev, u32 data)
10196 return 0; 10030 return 0;
10197} 10031}
10198 10032
10199static int tg3_get_sset_count (struct net_device *dev, int sset) 10033static int tg3_get_sset_count(struct net_device *dev, int sset)
10200{ 10034{
10201 switch (sset) { 10035 switch (sset) {
10202 case ETH_SS_TEST: 10036 case ETH_SS_TEST:
@@ -10208,7 +10042,7 @@ static int tg3_get_sset_count (struct net_device *dev, int sset)
10208 } 10042 }
10209} 10043}
10210 10044
10211static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf) 10045static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10212{ 10046{
10213 switch (stringset) { 10047 switch (stringset) {
10214 case ETH_SS_STATS: 10048 case ETH_SS_STATS:
@@ -10255,7 +10089,7 @@ static int tg3_phys_id(struct net_device *dev, u32 data)
10255 return 0; 10089 return 0;
10256} 10090}
10257 10091
10258static void tg3_get_ethtool_stats (struct net_device *dev, 10092static void tg3_get_ethtool_stats(struct net_device *dev,
10259 struct ethtool_stats *estats, u64 *tmp_stats) 10093 struct ethtool_stats *estats, u64 *tmp_stats)
10260{ 10094{
10261 struct tg3 *tp = netdev_priv(dev); 10095 struct tg3 *tp = netdev_priv(dev);
@@ -10361,8 +10195,7 @@ static int tg3_test_nvram(struct tg3 *tp)
10361 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1) 10195 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10362 parity[k++] = buf8[i] & msk; 10196 parity[k++] = buf8[i] & msk;
10363 i++; 10197 i++;
10364 } 10198 } else if (i == 16) {
10365 else if (i == 16) {
10366 int l; 10199 int l;
10367 u8 msk; 10200 u8 msk;
10368 10201
@@ -10460,7 +10293,7 @@ static int tg3_test_registers(struct tg3 *tp)
10460 { MAC_ADDR_0_HIGH, 0x0000, 10293 { MAC_ADDR_0_HIGH, 0x0000,
10461 0x00000000, 0x0000ffff }, 10294 0x00000000, 0x0000ffff },
10462 { MAC_ADDR_0_LOW, 0x0000, 10295 { MAC_ADDR_0_LOW, 0x0000,
10463 0x00000000, 0xffffffff }, 10296 0x00000000, 0xffffffff },
10464 { MAC_RX_MTU_SIZE, 0x0000, 10297 { MAC_RX_MTU_SIZE, 0x0000,
10465 0x00000000, 0x0000ffff }, 10298 0x00000000, 0x0000ffff },
10466 { MAC_TX_MODE, 0x0000, 10299 { MAC_TX_MODE, 0x0000,
@@ -10648,7 +10481,8 @@ static int tg3_test_registers(struct tg3 *tp)
10648 10481
10649out: 10482out:
10650 if (netif_msg_hw(tp)) 10483 if (netif_msg_hw(tp))
10651 pr_err("Register test failed at offset %x\n", offset); 10484 netdev_err(tp->dev,
10485 "Register test failed at offset %x\n", offset);
10652 tw32(offset, save_val); 10486 tw32(offset, save_val);
10653 return -EIO; 10487 return -EIO;
10654} 10488}
@@ -10824,9 +10658,9 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10824 MII_TG3_EXT_CTRL_LNK3_LED_MODE); 10658 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10825 } 10659 }
10826 tw32(MAC_MODE, mac_mode); 10660 tw32(MAC_MODE, mac_mode);
10827 } 10661 } else {
10828 else
10829 return -EINVAL; 10662 return -EINVAL;
10663 }
10830 10664
10831 err = -EIO; 10665 err = -EIO;
10832 10666
@@ -10908,7 +10742,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10908 10742
10909 rx_skb = tpr->rx_std_buffers[desc_idx].skb; 10743 rx_skb = tpr->rx_std_buffers[desc_idx].skb;
10910 10744
10911 map = pci_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping); 10745 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx], mapping);
10912 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE); 10746 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10913 10747
10914 for (i = 14; i < tx_len; i++) { 10748 for (i = 14; i < tx_len; i++) {
@@ -11082,7 +10916,7 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11082 return phy_mii_ioctl(phydev, data, cmd); 10916 return phy_mii_ioctl(phydev, data, cmd);
11083 } 10917 }
11084 10918
11085 switch(cmd) { 10919 switch (cmd) {
11086 case SIOCGMIIPHY: 10920 case SIOCGMIIPHY:
11087 data->phy_id = tp->phy_addr; 10921 data->phy_id = tp->phy_addr;
11088 10922
@@ -11775,7 +11609,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
11775 tp->tg3_flags |= TG3_FLAG_NVRAM; 11609 tp->tg3_flags |= TG3_FLAG_NVRAM;
11776 11610
11777 if (tg3_nvram_lock(tp)) { 11611 if (tg3_nvram_lock(tp)) {
11778 netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n", 11612 netdev_warn(tp->dev,
11613 "Cannot get nvram lock, %s failed\n",
11779 __func__); 11614 __func__);
11780 return; 11615 return;
11781 } 11616 }
@@ -11894,7 +11729,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11894 if (ret) 11729 if (ret)
11895 break; 11730 break;
11896 11731
11897 page_off = offset & pagemask; 11732 page_off = offset & pagemask;
11898 size = pagesize; 11733 size = pagesize;
11899 if (len < size) 11734 if (len < size)
11900 size = len; 11735 size = len;
@@ -11922,7 +11757,7 @@ static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11922 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR | 11757 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11923 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE; 11758 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11924 11759
11925 if (tg3_nvram_exec_cmd(tp, nvram_cmd)) 11760 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11926 break; 11761 break;
11927 11762
11928 /* Issue another write enable to start the write. */ 11763 /* Issue another write enable to start the write. */
@@ -11976,7 +11811,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11976 memcpy(&data, buf + i, 4); 11811 memcpy(&data, buf + i, 4);
11977 tw32(NVRAM_WRDATA, be32_to_cpu(data)); 11812 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11978 11813
11979 page_off = offset % tp->nvram_pagesize; 11814 page_off = offset % tp->nvram_pagesize;
11980 11815
11981 phy_addr = tg3_nvram_phys_addr(tp, offset); 11816 phy_addr = tg3_nvram_phys_addr(tp, offset);
11982 11817
@@ -11984,7 +11819,7 @@ static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11984 11819
11985 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR; 11820 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11986 11821
11987 if ((page_off == 0) || (i == 0)) 11822 if (page_off == 0 || i == 0)
11988 nvram_cmd |= NVRAM_CMD_FIRST; 11823 nvram_cmd |= NVRAM_CMD_FIRST;
11989 if (page_off == (tp->nvram_pagesize - 4)) 11824 if (page_off == (tp->nvram_pagesize - 4))
11990 nvram_cmd |= NVRAM_CMD_LAST; 11825 nvram_cmd |= NVRAM_CMD_LAST;
@@ -12027,8 +11862,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12027 11862
12028 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) { 11863 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
12029 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf); 11864 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12030 } 11865 } else {
12031 else {
12032 u32 grc_mode; 11866 u32 grc_mode;
12033 11867
12034 ret = tg3_nvram_lock(tp); 11868 ret = tg3_nvram_lock(tp);
@@ -12048,8 +11882,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12048 11882
12049 ret = tg3_nvram_write_block_buffered(tp, offset, len, 11883 ret = tg3_nvram_write_block_buffered(tp, offset, len,
12050 buf); 11884 buf);
12051 } 11885 } else {
12052 else {
12053 ret = tg3_nvram_write_block_unbuffered(tp, offset, len, 11886 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12054 buf); 11887 buf);
12055 } 11888 }
@@ -12544,11 +12377,11 @@ skip_phy_reset:
12544 return err; 12377 return err;
12545} 12378}
12546 12379
12547static void __devinit tg3_read_partno(struct tg3 *tp) 12380static void __devinit tg3_read_vpd(struct tg3 *tp)
12548{ 12381{
12549 unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */ 12382 u8 vpd_data[TG3_NVM_VPD_LEN];
12550 unsigned int block_end, rosize, len; 12383 unsigned int block_end, rosize, len;
12551 int i = 0; 12384 int j, i = 0;
12552 u32 magic; 12385 u32 magic;
12553 12386
12554 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) || 12387 if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12597,6 +12430,32 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
12597 if (block_end > TG3_NVM_VPD_LEN) 12430 if (block_end > TG3_NVM_VPD_LEN)
12598 goto out_not_found; 12431 goto out_not_found;
12599 12432
12433 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12434 PCI_VPD_RO_KEYWORD_MFR_ID);
12435 if (j > 0) {
12436 len = pci_vpd_info_field_size(&vpd_data[j]);
12437
12438 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12439 if (j + len > block_end || len != 4 ||
12440 memcmp(&vpd_data[j], "1028", 4))
12441 goto partno;
12442
12443 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12444 PCI_VPD_RO_KEYWORD_VENDOR0);
12445 if (j < 0)
12446 goto partno;
12447
12448 len = pci_vpd_info_field_size(&vpd_data[j]);
12449
12450 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12451 if (j + len > block_end)
12452 goto partno;
12453
12454 memcpy(tp->fw_ver, &vpd_data[j], len);
12455 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
12456 }
12457
12458partno:
12600 i = pci_vpd_find_info_keyword(vpd_data, i, rosize, 12459 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
12601 PCI_VPD_RO_KEYWORD_PARTNO); 12460 PCI_VPD_RO_KEYWORD_PARTNO);
12602 if (i < 0) 12461 if (i < 0)
@@ -12666,7 +12525,7 @@ static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
12666static void __devinit tg3_read_bc_ver(struct tg3 *tp) 12525static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12667{ 12526{
12668 u32 val, offset, start, ver_offset; 12527 u32 val, offset, start, ver_offset;
12669 int i; 12528 int i, dst_off;
12670 bool newver = false; 12529 bool newver = false;
12671 12530
12672 if (tg3_nvram_read(tp, 0xc, &offset) || 12531 if (tg3_nvram_read(tp, 0xc, &offset) ||
@@ -12686,8 +12545,11 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12686 newver = true; 12545 newver = true;
12687 } 12546 }
12688 12547
12548 dst_off = strlen(tp->fw_ver);
12549
12689 if (newver) { 12550 if (newver) {
12690 if (tg3_nvram_read(tp, offset + 8, &ver_offset)) 12551 if (TG3_VER_SIZE - dst_off < 16 ||
12552 tg3_nvram_read(tp, offset + 8, &ver_offset))
12691 return; 12553 return;
12692 12554
12693 offset = offset + ver_offset - start; 12555 offset = offset + ver_offset - start;
@@ -12696,7 +12558,7 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12696 if (tg3_nvram_read_be32(tp, offset + i, &v)) 12558 if (tg3_nvram_read_be32(tp, offset + i, &v))
12697 return; 12559 return;
12698 12560
12699 memcpy(tp->fw_ver + i, &v, sizeof(v)); 12561 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
12700 } 12562 }
12701 } else { 12563 } else {
12702 u32 major, minor; 12564 u32 major, minor;
@@ -12707,7 +12569,8 @@ static void __devinit tg3_read_bc_ver(struct tg3 *tp)
12707 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >> 12569 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
12708 TG3_NVM_BCVER_MAJSFT; 12570 TG3_NVM_BCVER_MAJSFT;
12709 minor = ver_offset & TG3_NVM_BCVER_MINMSK; 12571 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
12710 snprintf(&tp->fw_ver[0], 32, "v%d.%02d", major, minor); 12572 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
12573 "v%d.%02d", major, minor);
12711 } 12574 }
12712} 12575}
12713 12576
@@ -12731,9 +12594,7 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12731{ 12594{
12732 u32 offset, major, minor, build; 12595 u32 offset, major, minor, build;
12733 12596
12734 tp->fw_ver[0] = 's'; 12597 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
12735 tp->fw_ver[1] = 'b';
12736 tp->fw_ver[2] = '\0';
12737 12598
12738 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1) 12599 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
12739 return; 12600 return;
@@ -12770,11 +12631,14 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
12770 if (minor > 99 || build > 26) 12631 if (minor > 99 || build > 26)
12771 return; 12632 return;
12772 12633
12773 snprintf(&tp->fw_ver[2], 30, " v%d.%02d", major, minor); 12634 offset = strlen(tp->fw_ver);
12635 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
12636 " v%d.%02d", major, minor);
12774 12637
12775 if (build > 0) { 12638 if (build > 0) {
12776 tp->fw_ver[8] = 'a' + build - 1; 12639 offset = strlen(tp->fw_ver);
12777 tp->fw_ver[9] = '\0'; 12640 if (offset < TG3_VER_SIZE - 1)
12641 tp->fw_ver[offset] = 'a' + build - 1;
12778 } 12642 }
12779} 12643}
12780 12644
@@ -12861,12 +12725,13 @@ static void __devinit tg3_read_dash_ver(struct tg3 *tp)
12861static void __devinit tg3_read_fw_ver(struct tg3 *tp) 12725static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12862{ 12726{
12863 u32 val; 12727 u32 val;
12728 bool vpd_vers = false;
12864 12729
12865 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) { 12730 if (tp->fw_ver[0] != 0)
12866 tp->fw_ver[0] = 's'; 12731 vpd_vers = true;
12867 tp->fw_ver[1] = 'b';
12868 tp->fw_ver[2] = '\0';
12869 12732
12733 if (tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) {
12734 strcat(tp->fw_ver, "sb");
12870 return; 12735 return;
12871 } 12736 }
12872 12737
@@ -12883,11 +12748,12 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
12883 return; 12748 return;
12884 12749
12885 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) || 12750 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
12886 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) 12751 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) || vpd_vers)
12887 return; 12752 goto done;
12888 12753
12889 tg3_read_mgmtfw_ver(tp); 12754 tg3_read_mgmtfw_ver(tp);
12890 12755
12756done:
12891 tp->fw_ver[TG3_VER_SIZE - 1] = 0; 12757 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
12892} 12758}
12893 12759
@@ -12897,9 +12763,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12897{ 12763{
12898 static struct pci_device_id write_reorder_chipsets[] = { 12764 static struct pci_device_id write_reorder_chipsets[] = {
12899 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12765 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12900 PCI_DEVICE_ID_AMD_FE_GATE_700C) }, 12766 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
12901 { PCI_DEVICE(PCI_VENDOR_ID_AMD, 12767 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
12902 PCI_DEVICE_ID_AMD_8131_BRIDGE) }, 12768 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
12903 { PCI_DEVICE(PCI_VENDOR_ID_VIA, 12769 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
12904 PCI_DEVICE_ID_VIA_8385_0) }, 12770 PCI_DEVICE_ID_VIA_8385_0) },
12905 { }, 12771 { },
@@ -13065,8 +12931,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13065 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS; 12931 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
13066 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG; 12932 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
13067 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI); 12933 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13068 } 12934 } else {
13069 else {
13070 struct pci_dev *bridge = NULL; 12935 struct pci_dev *bridge = NULL;
13071 12936
13072 do { 12937 do {
@@ -13128,6 +12993,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13128 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG; 12993 tp->dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13129 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 12994 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
13130 tp->dev->features |= NETIF_F_IPV6_CSUM; 12995 tp->dev->features |= NETIF_F_IPV6_CSUM;
12996 tp->dev->features |= NETIF_F_GRO;
13131 } 12997 }
13132 12998
13133 /* Determine TSO capabilities */ 12999 /* Determine TSO capabilities */
@@ -13188,8 +13054,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13188 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG; 13054 tp->tg3_flags3 |= TG3_FLG3_USE_JUMBO_BDFLAG;
13189 13055
13190 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 13056 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
13191 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 13057 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
13192 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG)) 13058 (tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG))
13193 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE; 13059 tp->tg3_flags |= TG3_FLAG_JUMBO_CAPABLE;
13194 13060
13195 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, 13061 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
@@ -13223,7 +13089,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13223 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) { 13089 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
13224 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX); 13090 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13225 if (!tp->pcix_cap) { 13091 if (!tp->pcix_cap) {
13226 pr_err("Cannot find PCI-X capability, aborting\n"); 13092 dev_err(&tp->pdev->dev,
13093 "Cannot find PCI-X capability, aborting\n");
13227 return -EIO; 13094 return -EIO;
13228 } 13095 }
13229 13096
@@ -13420,7 +13287,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13420 /* Force the chip into D0. */ 13287 /* Force the chip into D0. */
13421 err = tg3_set_power_state(tp, PCI_D0); 13288 err = tg3_set_power_state(tp, PCI_D0);
13422 if (err) { 13289 if (err) {
13423 pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev)); 13290 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13424 return err; 13291 return err;
13425 } 13292 }
13426 13293
@@ -13594,13 +13461,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13594 13461
13595 err = tg3_phy_probe(tp); 13462 err = tg3_phy_probe(tp);
13596 if (err) { 13463 if (err) {
13597 pr_err("(%s) phy probe failed, err %d\n", 13464 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
13598 pci_name(tp->pdev), err);
13599 /* ... but do not return immediately ... */ 13465 /* ... but do not return immediately ... */
13600 tg3_mdio_fini(tp); 13466 tg3_mdio_fini(tp);
13601 } 13467 }
13602 13468
13603 tg3_read_partno(tp); 13469 tg3_read_vpd(tp);
13604 tg3_read_fw_ver(tp); 13470 tg3_read_fw_ver(tp);
13605 13471
13606 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) { 13472 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
@@ -13638,10 +13504,15 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13638 else 13504 else
13639 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES; 13505 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
13640 13506
13641 tp->rx_offset = NET_IP_ALIGN; 13507 tp->rx_offset = NET_IP_ALIGN + TG3_RX_HEADROOM;
13508 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
13642 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 && 13509 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
13643 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) 13510 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
13644 tp->rx_offset = 0; 13511 tp->rx_offset -= NET_IP_ALIGN;
13512#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
13513 tp->rx_copy_thresh = ~(u16)0;
13514#endif
13515 }
13645 13516
13646 tp->rx_std_max_post = TG3_RX_RING_SIZE; 13517 tp->rx_std_max_post = TG3_RX_RING_SIZE;
13647 13518
@@ -13964,11 +13835,10 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
13964 } 13835 }
13965 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0); 13836 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
13966 13837
13967 if (to_device) { 13838 if (to_device)
13968 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs); 13839 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
13969 } else { 13840 else
13970 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs); 13841 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
13971 }
13972 13842
13973 ret = -ENODEV; 13843 ret = -ENODEV;
13974 for (i = 0; i < 40; i++) { 13844 for (i = 0; i < 40; i++) {
@@ -14104,8 +13974,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14104 /* Send the buffer to the chip. */ 13974 /* Send the buffer to the chip. */
14105 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1); 13975 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14106 if (ret) { 13976 if (ret) {
14107 pr_err("tg3_test_dma() Write the buffer failed %d\n", 13977 dev_err(&tp->pdev->dev,
14108 ret); 13978 "%s: Buffer write failed. err = %d\n",
13979 __func__, ret);
14109 break; 13980 break;
14110 } 13981 }
14111 13982
@@ -14115,8 +13986,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14115 u32 val; 13986 u32 val;
14116 tg3_read_mem(tp, 0x2100 + (i*4), &val); 13987 tg3_read_mem(tp, 0x2100 + (i*4), &val);
14117 if (le32_to_cpu(val) != p[i]) { 13988 if (le32_to_cpu(val) != p[i]) {
14118 pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", 13989 dev_err(&tp->pdev->dev,
14119 val, i); 13990 "%s: Buffer corrupted on device! "
13991 "(%d != %d)\n", __func__, val, i);
14120 /* ret = -ENODEV here? */ 13992 /* ret = -ENODEV here? */
14121 } 13993 }
14122 p[i] = 0; 13994 p[i] = 0;
@@ -14125,9 +13997,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14125 /* Now read it back. */ 13997 /* Now read it back. */
14126 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0); 13998 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14127 if (ret) { 13999 if (ret) {
14128 pr_err("tg3_test_dma() Read the buffer failed %d\n", 14000 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14129 ret); 14001 "err = %d\n", __func__, ret);
14130
14131 break; 14002 break;
14132 } 14003 }
14133 14004
@@ -14143,8 +14014,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14143 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14014 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14144 break; 14015 break;
14145 } else { 14016 } else {
14146 pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", 14017 dev_err(&tp->pdev->dev,
14147 p[i], i); 14018 "%s: Buffer corrupted on read back! "
14019 "(%d != %d)\n", __func__, p[i], i);
14148 ret = -ENODEV; 14020 ret = -ENODEV;
14149 goto out; 14021 goto out;
14150 } 14022 }
@@ -14171,10 +14043,10 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14171 if (pci_dev_present(dma_wait_state_chipsets)) { 14043 if (pci_dev_present(dma_wait_state_chipsets)) {
14172 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK; 14044 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14173 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16; 14045 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14174 } 14046 } else {
14175 else
14176 /* Safe to use the calculated DMA boundary. */ 14047 /* Safe to use the calculated DMA boundary. */
14177 tp->dma_rwctrl = saved_dma_rwctrl; 14048 tp->dma_rwctrl = saved_dma_rwctrl;
14049 }
14178 14050
14179 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl); 14051 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14180 } 14052 }
@@ -14436,13 +14308,13 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14436 14308
14437 err = pci_enable_device(pdev); 14309 err = pci_enable_device(pdev);
14438 if (err) { 14310 if (err) {
14439 pr_err("Cannot enable PCI device, aborting\n"); 14311 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14440 return err; 14312 return err;
14441 } 14313 }
14442 14314
14443 err = pci_request_regions(pdev, DRV_MODULE_NAME); 14315 err = pci_request_regions(pdev, DRV_MODULE_NAME);
14444 if (err) { 14316 if (err) {
14445 pr_err("Cannot obtain PCI resources, aborting\n"); 14317 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14446 goto err_out_disable_pdev; 14318 goto err_out_disable_pdev;
14447 } 14319 }
14448 14320
@@ -14451,14 +14323,15 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14451 /* Find power-management capability. */ 14323 /* Find power-management capability. */
14452 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 14324 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14453 if (pm_cap == 0) { 14325 if (pm_cap == 0) {
14454 pr_err("Cannot find PowerManagement capability, aborting\n"); 14326 dev_err(&pdev->dev,
14327 "Cannot find Power Management capability, aborting\n");
14455 err = -EIO; 14328 err = -EIO;
14456 goto err_out_free_res; 14329 goto err_out_free_res;
14457 } 14330 }
14458 14331
14459 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS); 14332 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14460 if (!dev) { 14333 if (!dev) {
14461 pr_err("Etherdev alloc failed, aborting\n"); 14334 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14462 err = -ENOMEM; 14335 err = -ENOMEM;
14463 goto err_out_free_res; 14336 goto err_out_free_res;
14464 } 14337 }
@@ -14508,7 +14381,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14508 14381
14509 tp->regs = pci_ioremap_bar(pdev, BAR_0); 14382 tp->regs = pci_ioremap_bar(pdev, BAR_0);
14510 if (!tp->regs) { 14383 if (!tp->regs) {
14511 netdev_err(dev, "Cannot map device registers, aborting\n"); 14384 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
14512 err = -ENOMEM; 14385 err = -ENOMEM;
14513 goto err_out_free_dev; 14386 goto err_out_free_dev;
14514 } 14387 }
@@ -14524,7 +14397,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14524 14397
14525 err = tg3_get_invariants(tp); 14398 err = tg3_get_invariants(tp);
14526 if (err) { 14399 if (err) {
14527 netdev_err(dev, "Problem fetching invariants of chip, aborting\n"); 14400 dev_err(&pdev->dev,
14401 "Problem fetching invariants of chip, aborting\n");
14528 goto err_out_iounmap; 14402 goto err_out_iounmap;
14529 } 14403 }
14530 14404
@@ -14559,7 +14433,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14559 err = pci_set_consistent_dma_mask(pdev, 14433 err = pci_set_consistent_dma_mask(pdev,
14560 persist_dma_mask); 14434 persist_dma_mask);
14561 if (err < 0) { 14435 if (err < 0) {
14562 netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n"); 14436 dev_err(&pdev->dev, "Unable to obtain 64 bit "
14437 "DMA for consistent allocations\n");
14563 goto err_out_iounmap; 14438 goto err_out_iounmap;
14564 } 14439 }
14565 } 14440 }
@@ -14567,7 +14442,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14567 if (err || dma_mask == DMA_BIT_MASK(32)) { 14442 if (err || dma_mask == DMA_BIT_MASK(32)) {
14568 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 14443 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
14569 if (err) { 14444 if (err) {
14570 netdev_err(dev, "No usable DMA configuration, aborting\n"); 14445 dev_err(&pdev->dev,
14446 "No usable DMA configuration, aborting\n");
14571 goto err_out_iounmap; 14447 goto err_out_iounmap;
14572 } 14448 }
14573 } 14449 }
@@ -14616,14 +14492,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14616 14492
14617 err = tg3_get_device_address(tp); 14493 err = tg3_get_device_address(tp);
14618 if (err) { 14494 if (err) {
14619 netdev_err(dev, "Could not obtain valid ethernet address, aborting\n"); 14495 dev_err(&pdev->dev,
14496 "Could not obtain valid ethernet address, aborting\n");
14620 goto err_out_iounmap; 14497 goto err_out_iounmap;
14621 } 14498 }
14622 14499
14623 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 14500 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
14624 tp->aperegs = pci_ioremap_bar(pdev, BAR_2); 14501 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
14625 if (!tp->aperegs) { 14502 if (!tp->aperegs) {
14626 netdev_err(dev, "Cannot map APE registers, aborting\n"); 14503 dev_err(&pdev->dev,
14504 "Cannot map APE registers, aborting\n");
14627 err = -ENOMEM; 14505 err = -ENOMEM;
14628 goto err_out_iounmap; 14506 goto err_out_iounmap;
14629 } 14507 }
@@ -14647,7 +14525,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14647 14525
14648 err = tg3_test_dma(tp); 14526 err = tg3_test_dma(tp);
14649 if (err) { 14527 if (err) {
14650 netdev_err(dev, "DMA engine test failed, aborting\n"); 14528 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
14651 goto err_out_apeunmap; 14529 goto err_out_apeunmap;
14652 } 14530 }
14653 14531
@@ -14708,7 +14586,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14708 14586
14709 err = register_netdev(dev); 14587 err = register_netdev(dev);
14710 if (err) { 14588 if (err) {
14711 netdev_err(dev, "Cannot register net device, aborting\n"); 14589 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
14712 goto err_out_apeunmap; 14590 goto err_out_apeunmap;
14713 } 14591 }
14714 14592
@@ -14721,11 +14599,12 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14721 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 14599 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14722 struct phy_device *phydev; 14600 struct phy_device *phydev;
14723 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]; 14601 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14724 netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14602 netdev_info(dev,
14603 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14725 phydev->drv->name, dev_name(&phydev->dev)); 14604 phydev->drv->name, dev_name(&phydev->dev));
14726 } else 14605 } else
14727 netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14606 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
14728 tg3_phy_string(tp), 14607 "(WireSpeed[%d])\n", tg3_phy_string(tp),
14729 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" : 14608 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
14730 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" : 14609 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
14731 "10/100/1000Base-T")), 14610 "10/100/1000Base-T")),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 574a1cc4d353..ce9c4918c318 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -23,11 +23,8 @@
23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */ 23#define TG3_BDINFO_NIC_ADDR 0xcUL /* 32-bit */
24#define TG3_BDINFO_SIZE 0x10UL 24#define TG3_BDINFO_SIZE 0x10UL
25 25
26#define RX_COPY_THRESHOLD 256
27
28#define TG3_RX_INTERNAL_RING_SZ_5906 32 26#define TG3_RX_INTERNAL_RING_SZ_5906 32
29 27
30#define RX_STD_MAX_SIZE 1536
31#define RX_STD_MAX_SIZE_5705 512 28#define RX_STD_MAX_SIZE_5705 512
32#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */ 29#define RX_JUMBO_MAX_SIZE 0xdeadbeef /* XXX */
33 30
@@ -183,6 +180,7 @@
183#define METAL_REV_B2 0x02 180#define METAL_REV_B2 0x02
184#define TG3PCI_DMA_RW_CTRL 0x0000006c 181#define TG3PCI_DMA_RW_CTRL 0x0000006c
185#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001 182#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
183#define DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK 0x00000380
186#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 184#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
187#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 185#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
188#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 186#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -252,7 +250,7 @@
252/* 0x94 --> 0x98 unused */ 250/* 0x94 --> 0x98 unused */
253#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */ 251#define TG3PCI_STD_RING_PROD_IDX 0x00000098 /* 64-bit */
254#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */ 252#define TG3PCI_RCV_RET_RING_CON_IDX 0x000000a0 /* 64-bit */
255/* 0xa0 --> 0xb8 unused */ 253/* 0xa8 --> 0xb8 unused */
256#define TG3PCI_DUAL_MAC_CTRL 0x000000b8 254#define TG3PCI_DUAL_MAC_CTRL 0x000000b8
257#define DUAL_MAC_CTRL_CH_MASK 0x00000003 255#define DUAL_MAC_CTRL_CH_MASK 0x00000003
258#define DUAL_MAC_CTRL_ID 0x00000004 256#define DUAL_MAC_CTRL_ID 0x00000004
@@ -1854,6 +1852,8 @@
1854#define TG3_PCIE_TLDLPL_PORT 0x00007c00 1852#define TG3_PCIE_TLDLPL_PORT 0x00007c00
1855#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004 1853#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
1856#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000 1854#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
1855#define TG3_PCIE_PL_LO_PHYCTL5 0x00000014
1856#define TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ 0x80000000
1857 1857
1858/* OTP bit definitions */ 1858/* OTP bit definitions */
1859#define TG3_OTP_AGCTGT_MASK 0x000000e0 1859#define TG3_OTP_AGCTGT_MASK 0x000000e0
@@ -2082,7 +2082,7 @@
2082#define MII_TG3_DSP_AADJ1CH0 0x001f 2082#define MII_TG3_DSP_AADJ1CH0 0x001f
2083#define MII_TG3_DSP_AADJ1CH3 0x601f 2083#define MII_TG3_DSP_AADJ1CH3 0x601f
2084#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002 2084#define MII_TG3_DSP_AADJ1CH3_ADCCKADJ 0x0002
2085#define MII_TG3_DSP_EXP8 0x0708 2085#define MII_TG3_DSP_EXP8 0x0f08
2086#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001 2086#define MII_TG3_DSP_EXP8_REJ2MHz 0x0001
2087#define MII_TG3_DSP_EXP8_AEDW 0x0200 2087#define MII_TG3_DSP_EXP8_AEDW 0x0200
2088#define MII_TG3_DSP_EXP75 0x0f75 2088#define MII_TG3_DSP_EXP75 0x0f75
@@ -2512,7 +2512,7 @@ struct tg3_hw_stats {
2512 */ 2512 */
2513struct ring_info { 2513struct ring_info {
2514 struct sk_buff *skb; 2514 struct sk_buff *skb;
2515 DECLARE_PCI_UNMAP_ADDR(mapping) 2515 DEFINE_DMA_UNMAP_ADDR(mapping);
2516}; 2516};
2517 2517
2518struct tg3_config_info { 2518struct tg3_config_info {
@@ -2561,7 +2561,7 @@ struct tg3_bufmgr_config {
2561 2561
2562struct tg3_ethtool_stats { 2562struct tg3_ethtool_stats {
2563 /* Statistics maintained by Receive MAC. */ 2563 /* Statistics maintained by Receive MAC. */
2564 u64 rx_octets; 2564 u64 rx_octets;
2565 u64 rx_fragments; 2565 u64 rx_fragments;
2566 u64 rx_ucast_packets; 2566 u64 rx_ucast_packets;
2567 u64 rx_mcast_packets; 2567 u64 rx_mcast_packets;
@@ -2751,9 +2751,11 @@ struct tg3 {
2751 struct tg3_napi napi[TG3_IRQ_MAX_VECS]; 2751 struct tg3_napi napi[TG3_IRQ_MAX_VECS];
2752 void (*write32_rx_mbox) (struct tg3 *, u32, 2752 void (*write32_rx_mbox) (struct tg3 *, u32,
2753 u32); 2753 u32);
2754 u32 rx_copy_thresh;
2754 u32 rx_pending; 2755 u32 rx_pending;
2755 u32 rx_jumbo_pending; 2756 u32 rx_jumbo_pending;
2756 u32 rx_std_max_post; 2757 u32 rx_std_max_post;
2758 u32 rx_offset;
2757 u32 rx_pkt_map_sz; 2759 u32 rx_pkt_map_sz;
2758#if TG3_VLAN_TAG_USED 2760#if TG3_VLAN_TAG_USED
2759 struct vlan_group *vlgrp; 2761 struct vlan_group *vlgrp;
@@ -2773,7 +2775,6 @@ struct tg3 {
2773 unsigned long last_event_jiffies; 2775 unsigned long last_event_jiffies;
2774 }; 2776 };
2775 2777
2776 u32 rx_offset;
2777 u32 tg3_flags; 2778 u32 tg3_flags;
2778#define TG3_FLAG_TAGGED_STATUS 0x00000001 2779#define TG3_FLAG_TAGGED_STATUS 0x00000001
2779#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002 2780#define TG3_FLAG_TXD_MBOX_HWBUG 0x00000002
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 390540c101c7..8ffec22b74bf 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
1314 1314
1315static void TLan_SetMulticastList( struct net_device *dev ) 1315static void TLan_SetMulticastList( struct net_device *dev )
1316{ 1316{
1317 struct dev_mc_list *dmi; 1317 struct netdev_hw_addr *ha;
1318 u32 hash1 = 0; 1318 u32 hash1 = 0;
1319 u32 hash2 = 0; 1319 u32 hash2 = 0;
1320 int i; 1320 int i;
@@ -1336,12 +1336,12 @@ static void TLan_SetMulticastList( struct net_device *dev )
1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF ); 1336 TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
1337 } else { 1337 } else {
1338 i = 0; 1338 i = 0;
1339 netdev_for_each_mc_addr(dmi, dev) { 1339 netdev_for_each_mc_addr(ha, dev) {
1340 if ( i < 3 ) { 1340 if ( i < 3 ) {
1341 TLan_SetMac( dev, i + 1, 1341 TLan_SetMac( dev, i + 1,
1342 (char *) &dmi->dmi_addr ); 1342 (char *) &ha->addr);
1343 } else { 1343 } else {
1344 offset = TLan_HashFunc( (u8 *) &dmi->dmi_addr ); 1344 offset = TLan_HashFunc((u8 *)&ha->addr);
1345 if ( offset < 32 ) 1345 if ( offset < 32 )
1346 hash1 |= ( 1 << offset ); 1346 hash1 |= ( 1 << offset );
1347 else 1347 else
@@ -2464,7 +2464,7 @@ static void TLan_PhyPrint( struct net_device *dev )
2464 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name ); 2464 printk( "TLAN: Device %s, Unmanaged PHY.\n", dev->name );
2465 } else if ( phy <= TLAN_PHY_MAX_ADDR ) { 2465 } else if ( phy <= TLAN_PHY_MAX_ADDR ) {
2466 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy ); 2466 printk( "TLAN: Device %s, PHY 0x%02x.\n", dev->name, phy );
2467 printk( "TLAN: Off. +0 +1 +2 +3 \n" ); 2467 printk( "TLAN: Off. +0 +1 +2 +3\n" );
2468 for ( i = 0; i < 0x20; i+= 4 ) { 2468 for ( i = 0; i < 0x20; i+= 4 ) {
2469 printk( "TLAN: 0x%02x", i ); 2469 printk( "TLAN: 0x%02x", i );
2470 TLan_MiiReadReg( dev, phy, i, &data0 ); 2470 TLan_MiiReadReg( dev, phy, i, &data0 );
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index 7d7f3eef1ab3..10800f16a231 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -77,7 +77,7 @@ static char version[] __devinitdata =
77 77
78#define FW_NAME "3com/3C359.bin" 78#define FW_NAME "3com/3C359.bin"
79MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ; 79MODULE_AUTHOR("Mike Phillips <mikep@linuxtr.net>") ;
80MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver \n") ; 80MODULE_DESCRIPTION("3Com 3C359 Velocity XL Token Ring Adapter Driver\n") ;
81MODULE_FIRMWARE(FW_NAME); 81MODULE_FIRMWARE(FW_NAME);
82 82
83/* Module parameters */ 83/* Module parameters */
@@ -163,19 +163,19 @@ static void print_tx_state(struct net_device *dev)
163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 163 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
164 int i ; 164 int i ;
165 165
166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d \n",xl_priv->tx_ring_head, 166 printk("tx_ring_head: %d, tx_ring_tail: %d, free_ent: %d\n",xl_priv->tx_ring_head,
167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ; 167 xl_priv->tx_ring_tail, xl_priv->free_ring_entries) ;
168 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len \n"); 168 printk("Ring , Address , FSH , DnNextPtr, Buffer, Buffer_Len\n");
169 for (i = 0; i < 16; i++) { 169 for (i = 0; i < 16; i++) {
170 txd = &(xl_priv->xl_tx_ring[i]) ; 170 txd = &(xl_priv->xl_tx_ring[i]) ;
171 printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(txd), 171 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(txd),
172 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ; 172 txd->framestartheader, txd->dnnextptr, txd->buffer, txd->buffer_length ) ;
173 } 173 }
174 174
175 printk("DNLISTPTR = %04x \n", readl(xl_mmio + MMIO_DNLISTPTR) ); 175 printk("DNLISTPTR = %04x\n", readl(xl_mmio + MMIO_DNLISTPTR) );
176 176
177 printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); 177 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL) );
178 printk("Queue status = %0x \n",netif_running(dev) ) ; 178 printk("Queue status = %0x\n",netif_running(dev) ) ;
179} 179}
180 180
181static void print_rx_state(struct net_device *dev) 181static void print_rx_state(struct net_device *dev)
@@ -186,19 +186,19 @@ static void print_rx_state(struct net_device *dev)
186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ; 186 u8 __iomem *xl_mmio = xl_priv->xl_mmio ;
187 int i ; 187 int i ;
188 188
189 printk("rx_ring_tail: %d \n", xl_priv->rx_ring_tail) ; 189 printk("rx_ring_tail: %d\n", xl_priv->rx_ring_tail);
190 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len \n"); 190 printk("Ring , Address , FrameState , UPNextPtr, FragAddr, Frag_Len\n");
191 for (i = 0; i < 16; i++) { 191 for (i = 0; i < 16; i++) {
192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */ 192 /* rxd = (struct xl_rx_desc *)xl_priv->rx_ring_dma_addr + (i * sizeof(struct xl_rx_desc)) ; */
193 rxd = &(xl_priv->xl_rx_ring[i]) ; 193 rxd = &(xl_priv->xl_rx_ring[i]) ;
194 printk("%d, %08lx, %08x, %08x, %08x, %08x \n", i, virt_to_bus(rxd), 194 printk("%d, %08lx, %08x, %08x, %08x, %08x\n", i, virt_to_bus(rxd),
195 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ; 195 rxd->framestatus, rxd->upnextptr, rxd->upfragaddr, rxd->upfraglen ) ;
196 } 196 }
197 197
198 printk("UPLISTPTR = %04x \n", readl(xl_mmio + MMIO_UPLISTPTR) ); 198 printk("UPLISTPTR = %04x\n", readl(xl_mmio + MMIO_UPLISTPTR));
199 199
200 printk("DmaCtl = %04x \n", readl(xl_mmio + MMIO_DMA_CTRL) ); 200 printk("DmaCtl = %04x\n", readl(xl_mmio + MMIO_DMA_CTRL));
201 printk("Queue status = %0x \n",netif_running(dev) ) ; 201 printk("Queue status = %0x\n",netif_running(dev));
202} 202}
203#endif 203#endif
204 204
@@ -391,7 +391,7 @@ static int __devinit xl_init(struct net_device *dev)
391 struct xl_private *xl_priv = netdev_priv(dev); 391 struct xl_private *xl_priv = netdev_priv(dev);
392 int err; 392 int err;
393 393
394 printk(KERN_INFO "%s \n", version); 394 printk(KERN_INFO "%s\n", version);
395 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n", 395 printk(KERN_INFO "%s: I/O at %hx, MMIO at %p, using irq %d\n",
396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq); 396 xl_priv->xl_card_name, (unsigned int)dev->base_addr ,xl_priv->xl_mmio, dev->irq);
397 397
@@ -463,7 +463,7 @@ static int xl_hw_reset(struct net_device *dev)
463 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD); 463 writel( (IO_WORD_READ | PMBAR),xl_mmio + MMIO_MAC_ACCESS_CMD);
464 464
465#if XL_DEBUG 465#if XL_DEBUG
466 printk(KERN_INFO "Read from PMBAR = %04x \n", readw(xl_mmio + MMIO_MACDATA)) ; 466 printk(KERN_INFO "Read from PMBAR = %04x\n", readw(xl_mmio + MMIO_MACDATA));
467#endif 467#endif
468 468
469 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) { 469 if ( readw( (xl_mmio + MMIO_MACDATA)) & PMB_CPHOLD ) {
@@ -591,9 +591,9 @@ static int xl_hw_reset(struct net_device *dev)
591#if XL_DEBUG 591#if XL_DEBUG
592 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 592 writel(IO_WORD_READ | SWITCHSETTINGS, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
593 if ( readw(xl_mmio + MMIO_MACDATA) & 2) { 593 if ( readw(xl_mmio + MMIO_MACDATA) & 2) {
594 printk(KERN_INFO "Default ring speed 4 mbps \n") ; 594 printk(KERN_INFO "Default ring speed 4 mbps\n");
595 } else { 595 } else {
596 printk(KERN_INFO "Default ring speed 16 mbps \n") ; 596 printk(KERN_INFO "Default ring speed 16 mbps\n");
597 } 597 }
598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb); 598 printk(KERN_INFO "%s: xl_priv->srb = %04x\n",xl_priv->xl_card_name, xl_priv->srb);
599#endif 599#endif
@@ -651,7 +651,7 @@ static int xl_open(struct net_device *dev)
651 651
652 if (open_err != 0) { /* Something went wrong with the open command */ 652 if (open_err != 0) { /* Something went wrong with the open command */
653 if (open_err & 0x07) { /* Wrong speed, retry at different speed */ 653 if (open_err & 0x07) { /* Wrong speed, retry at different speed */
654 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed \n", dev->name) ; 654 printk(KERN_WARNING "%s: Open Error, retrying at different ringspeed\n", dev->name);
655 switchsettings = switchsettings ^ 2 ; 655 switchsettings = switchsettings ^ 2 ;
656 xl_ee_write(dev,0x08,switchsettings) ; 656 xl_ee_write(dev,0x08,switchsettings) ;
657 xl_hw_reset(dev) ; 657 xl_hw_reset(dev) ;
@@ -703,7 +703,7 @@ static int xl_open(struct net_device *dev)
703 } 703 }
704 704
705 if (i==0) { 705 if (i==0) {
706 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled \n",dev->name) ; 706 printk(KERN_WARNING "%s: Not enough memory to allocate rx buffers. Adapter disabled\n",dev->name);
707 free_irq(dev->irq,dev) ; 707 free_irq(dev->irq,dev) ;
708 kfree(xl_priv->xl_tx_ring); 708 kfree(xl_priv->xl_tx_ring);
709 kfree(xl_priv->xl_rx_ring); 709 kfree(xl_priv->xl_rx_ring);
@@ -853,7 +853,7 @@ static int xl_open_hw(struct net_device *dev)
853 853
854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 854 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 12, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 855 xl_priv->arb = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
856 printk(", ARB: %04x \n",xl_priv->arb ) ; 856 printk(", ARB: %04x\n",xl_priv->arb );
857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 857 writel( (MEM_WORD_READ | 0xD0000 | xl_priv->srb) + 14, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
858 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 858 vsoff = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
859 859
@@ -867,7 +867,7 @@ static int xl_open_hw(struct net_device *dev)
867 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ; 867 ver_str[i] = readb(xl_mmio + MMIO_MACDATA) ;
868 } 868 }
869 ver_str[i] = '\0' ; 869 ver_str[i] = '\0' ;
870 printk(KERN_INFO "%s: Microcode version String: %s \n",dev->name,ver_str); 870 printk(KERN_INFO "%s: Microcode version String: %s\n",dev->name,ver_str);
871 } 871 }
872 872
873 /* 873 /*
@@ -991,7 +991,7 @@ static void xl_rx(struct net_device *dev)
991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ; 991 skb = dev_alloc_skb(xl_priv->pkt_buf_sz) ;
992 992
993 if (skb==NULL) { /* Still need to fix the rx ring */ 993 if (skb==NULL) { /* Still need to fix the rx ring */
994 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer \n",dev->name) ; 994 printk(KERN_WARNING "%s: dev_alloc_skb failed in rx, single buffer\n",dev->name);
995 adv_rx_ring(dev) ; 995 adv_rx_ring(dev) ;
996 dev->stats.rx_dropped++ ; 996 dev->stats.rx_dropped++ ;
997 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ; 997 writel(ACK_INTERRUPT | UPCOMPACK | LATCH_ACK , xl_mmio + MMIO_COMMAND) ;
@@ -1092,7 +1092,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1092 */ 1092 */
1093 if (intstatus == 0x0001) { 1093 if (intstatus == 0x0001) {
1094 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1094 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1095 printk(KERN_INFO "%s: 00001 int received \n",dev->name) ; 1095 printk(KERN_INFO "%s: 00001 int received\n",dev->name);
1096 } else { 1096 } else {
1097 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) { 1097 if (intstatus & (HOSTERRINT | SRBRINT | ARBCINT | UPCOMPINT | DNCOMPINT | HARDERRINT | (1<<8) | TXUNDERRUN | ASBFINT)) {
1098 1098
@@ -1103,9 +1103,9 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1103 */ 1103 */
1104 1104
1105 if (intstatus & HOSTERRINT) { 1105 if (intstatus & HOSTERRINT) {
1106 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x \n",dev->name,intstatus) ; 1106 printk(KERN_WARNING "%s: Host Error, performing global reset, intstatus = %04x\n",dev->name,intstatus);
1107 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ; 1107 writew( GLOBAL_RESET, xl_mmio + MMIO_COMMAND ) ;
1108 printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); 1108 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1109 netif_stop_queue(dev) ; 1109 netif_stop_queue(dev) ;
1110 xl_freemem(dev) ; 1110 xl_freemem(dev) ;
1111 free_irq(dev->irq,dev); 1111 free_irq(dev->irq,dev);
@@ -1128,7 +1128,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1128 Must put a timeout check here ! */ 1128 Must put a timeout check here ! */
1129 /* Empty Loop */ 1129 /* Empty Loop */
1130 } 1130 }
1131 printk(KERN_WARNING "%s: TX Underrun received \n",dev->name) ; 1131 printk(KERN_WARNING "%s: TX Underrun received\n",dev->name);
1132 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1132 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1133 } /* TxUnderRun */ 1133 } /* TxUnderRun */
1134 1134
@@ -1157,13 +1157,13 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1157 macstatus = readw(xl_mmio + MMIO_MACDATA) ; 1157 macstatus = readw(xl_mmio + MMIO_MACDATA) ;
1158 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name); 1158 printk(KERN_WARNING "%s: MacStatusError, details: ", dev->name);
1159 if (macstatus & (1<<14)) 1159 if (macstatus & (1<<14))
1160 printk(KERN_WARNING "tchk error: Unrecoverable error \n") ; 1160 printk(KERN_WARNING "tchk error: Unrecoverable error\n");
1161 if (macstatus & (1<<3)) 1161 if (macstatus & (1<<3))
1162 printk(KERN_WARNING "eint error: Internal watchdog timer expired \n") ; 1162 printk(KERN_WARNING "eint error: Internal watchdog timer expired\n");
1163 if (macstatus & (1<<2)) 1163 if (macstatus & (1<<2))
1164 printk(KERN_WARNING "aint error: Host tried to perform invalid operation \n") ; 1164 printk(KERN_WARNING "aint error: Host tried to perform invalid operation\n");
1165 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ; 1165 printk(KERN_WARNING "Instatus = %02x, macstatus = %02x\n",intstatus,macstatus) ;
1166 printk(KERN_WARNING "%s: Resetting hardware: \n", dev->name); 1166 printk(KERN_WARNING "%s: Resetting hardware:\n", dev->name);
1167 netif_stop_queue(dev) ; 1167 netif_stop_queue(dev) ;
1168 xl_freemem(dev) ; 1168 xl_freemem(dev) ;
1169 free_irq(dev->irq,dev); 1169 free_irq(dev->irq,dev);
@@ -1175,7 +1175,7 @@ static irqreturn_t xl_interrupt(int irq, void *dev_id)
1175 return IRQ_HANDLED; 1175 return IRQ_HANDLED;
1176 } 1176 }
1177 } else { 1177 } else {
1178 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x \n", dev->name, intstatus) ; 1178 printk(KERN_WARNING "%s: Received Unknown interrupt : %04x\n", dev->name, intstatus);
1179 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1179 writel(ACK_INTERRUPT | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1180 } 1180 }
1181 } 1181 }
@@ -1350,11 +1350,11 @@ static int xl_close(struct net_device *dev)
1350 1350
1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD); 1351 writel(MEM_BYTE_READ | 0xd0000 | xl_priv->srb, xl_mmio + MMIO_MAC_ACCESS_CMD);
1352 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) { 1352 if (readb(xl_mmio + MMIO_MACDATA) != CLOSE_NIC) {
1353 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response \n",dev->name) ; 1353 printk(KERN_INFO "%s: CLOSE_NIC did not get a CLOSE_NIC response\n",dev->name);
1354 } else { 1354 } else {
1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1355 writel((MEM_BYTE_READ | 0xd0000 | xl_priv->srb) +2, xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1356 if (readb(xl_mmio + MMIO_MACDATA)==0) { 1356 if (readb(xl_mmio + MMIO_MACDATA)==0) {
1357 printk(KERN_INFO "%s: Adapter has been closed \n",dev->name) ; 1357 printk(KERN_INFO "%s: Adapter has been closed\n",dev->name);
1358 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ; 1358 writew(ACK_INTERRUPT | SRBRACK | LATCH_ACK, xl_mmio + MMIO_COMMAND) ;
1359 1359
1360 xl_freemem(dev) ; 1360 xl_freemem(dev) ;
@@ -1391,7 +1391,7 @@ static int xl_close(struct net_device *dev)
1391static void xl_set_rx_mode(struct net_device *dev) 1391static void xl_set_rx_mode(struct net_device *dev)
1392{ 1392{
1393 struct xl_private *xl_priv = netdev_priv(dev); 1393 struct xl_private *xl_priv = netdev_priv(dev);
1394 struct dev_mc_list *dmi; 1394 struct netdev_hw_addr *ha;
1395 unsigned char dev_mc_address[4] ; 1395 unsigned char dev_mc_address[4] ;
1396 u16 options ; 1396 u16 options ;
1397 1397
@@ -1408,11 +1408,11 @@ static void xl_set_rx_mode(struct net_device *dev)
1408 1408
1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1409 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1410 1410
1411 netdev_for_each_mc_addr(dmi, dev) { 1411 netdev_for_each_mc_addr(ha, dev) {
1412 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1412 dev_mc_address[0] |= ha->addr[2];
1413 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1413 dev_mc_address[1] |= ha->addr[3];
1414 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1414 dev_mc_address[2] |= ha->addr[4];
1415 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1415 dev_mc_address[3] |= ha->addr[5];
1416 } 1416 }
1417 1417
1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */ 1418 if (memcmp(xl_priv->xl_functional_addr,dev_mc_address,4) != 0) { /* Options have changed, run the command */
@@ -1447,11 +1447,11 @@ static void xl_srb_bh(struct net_device *dev)
1447 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ; 1447 printk(KERN_INFO "%s: Command: %d - Invalid Command code\n",dev->name,srb_cmd) ;
1448 break ; 1448 break ;
1449 case 4: 1449 case 4:
1450 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command \n",dev->name,srb_cmd) ; 1450 printk(KERN_INFO "%s: Command: %d - Adapter is closed, must be open for this command\n",dev->name,srb_cmd);
1451 break ; 1451 break ;
1452 1452
1453 case 6: 1453 case 6:
1454 printk(KERN_INFO "%s: Command: %d - Options Invalid for command \n",dev->name,srb_cmd) ; 1454 printk(KERN_INFO "%s: Command: %d - Options Invalid for command\n",dev->name,srb_cmd);
1455 break ; 1455 break ;
1456 1456
1457 case 0: /* Successful command execution */ 1457 case 0: /* Successful command execution */
@@ -1472,11 +1472,11 @@ static void xl_srb_bh(struct net_device *dev)
1472 break ; 1472 break ;
1473 case SET_FUNC_ADDRESS: 1473 case SET_FUNC_ADDRESS:
1474 if(xl_priv->xl_message_level) 1474 if(xl_priv->xl_message_level)
1475 printk(KERN_INFO "%s: Functional Address Set \n",dev->name) ; 1475 printk(KERN_INFO "%s: Functional Address Set\n",dev->name);
1476 break ; 1476 break ;
1477 case CLOSE_NIC: 1477 case CLOSE_NIC:
1478 if(xl_priv->xl_message_level) 1478 if(xl_priv->xl_message_level)
1479 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler \n",dev->name) ; 1479 printk(KERN_INFO "%s: Received CLOSE_NIC interrupt in interrupt handler\n",dev->name);
1480 break ; 1480 break ;
1481 case SET_MULTICAST_MODE: 1481 case SET_MULTICAST_MODE:
1482 if(xl_priv->xl_message_level) 1482 if(xl_priv->xl_message_level)
@@ -1485,9 +1485,9 @@ static void xl_srb_bh(struct net_device *dev)
1485 case SET_RECEIVE_MODE: 1485 case SET_RECEIVE_MODE:
1486 if(xl_priv->xl_message_level) { 1486 if(xl_priv->xl_message_level) {
1487 if (xl_priv->xl_copy_all_options == 0x0004) 1487 if (xl_priv->xl_copy_all_options == 0x0004)
1488 printk(KERN_INFO "%s: Entering promiscuous mode \n", dev->name) ; 1488 printk(KERN_INFO "%s: Entering promiscuous mode\n", dev->name);
1489 else 1489 else
1490 printk(KERN_INFO "%s: Entering normal receive mode \n",dev->name) ; 1490 printk(KERN_INFO "%s: Entering normal receive mode\n",dev->name);
1491 } 1491 }
1492 break ; 1492 break ;
1493 1493
@@ -1557,20 +1557,20 @@ static void xl_arb_cmd(struct net_device *dev)
1557 xl_freemem(dev) ; 1557 xl_freemem(dev) ;
1558 free_irq(dev->irq,dev); 1558 free_irq(dev->irq,dev);
1559 1559
1560 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; 1560 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1561 } /* If serious error */ 1561 } /* If serious error */
1562 1562
1563 if (xl_priv->xl_message_level) { 1563 if (xl_priv->xl_message_level) {
1564 if (lan_status_diff & LSC_SIG_LOSS) 1564 if (lan_status_diff & LSC_SIG_LOSS)
1565 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; 1565 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1566 if (lan_status_diff & LSC_HARD_ERR) 1566 if (lan_status_diff & LSC_HARD_ERR)
1567 printk(KERN_INFO "%s: Beaconing \n",dev->name); 1567 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1568 if (lan_status_diff & LSC_SOFT_ERR) 1568 if (lan_status_diff & LSC_SOFT_ERR)
1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); 1569 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1570 if (lan_status_diff & LSC_TRAN_BCN) 1570 if (lan_status_diff & LSC_TRAN_BCN)
1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1571 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1572 if (lan_status_diff & LSC_SS) 1572 if (lan_status_diff & LSC_SS)
1573 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1573 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1574 if (lan_status_diff & LSC_RING_REC) 1574 if (lan_status_diff & LSC_RING_REC)
1575 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); 1575 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1576 if (lan_status_diff & LSC_FDX_MODE) 1576 if (lan_status_diff & LSC_FDX_MODE)
@@ -1579,7 +1579,7 @@ static void xl_arb_cmd(struct net_device *dev)
1579 1579
1580 if (lan_status_diff & LSC_CO) { 1580 if (lan_status_diff & LSC_CO) {
1581 if (xl_priv->xl_message_level) 1581 if (xl_priv->xl_message_level)
1582 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1582 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1583 /* Issue READ.LOG command */ 1583 /* Issue READ.LOG command */
1584 xl_srb_cmd(dev, READ_LOG) ; 1584 xl_srb_cmd(dev, READ_LOG) ;
1585 } 1585 }
@@ -1595,7 +1595,7 @@ static void xl_arb_cmd(struct net_device *dev)
1595 } /* Lan.change.status */ 1595 } /* Lan.change.status */
1596 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */ 1596 else if ( arb_cmd == RECEIVE_DATA) { /* Received.Data */
1597#if XL_DEBUG 1597#if XL_DEBUG
1598 printk(KERN_INFO "Received.Data \n") ; 1598 printk(KERN_INFO "Received.Data\n");
1599#endif 1599#endif
1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ; 1600 writel( ((MEM_WORD_READ | 0xD0000 | xl_priv->arb) + 6), xl_mmio + MMIO_MAC_ACCESS_CMD) ;
1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ; 1601 xl_priv->mac_buffer = swab16(readw(xl_mmio + MMIO_MACDATA)) ;
@@ -1630,7 +1630,7 @@ static void xl_arb_cmd(struct net_device *dev)
1630 xl_asb_cmd(dev) ; 1630 xl_asb_cmd(dev) ;
1631 1631
1632 } else { 1632 } else {
1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x \n",dev->name,arb_cmd) ; 1633 printk(KERN_WARNING "%s: Received unknown arb (xl_priv) command: %02x\n",dev->name,arb_cmd);
1634 } 1634 }
1635 1635
1636 /* Acknowledge the arb interrupt */ 1636 /* Acknowledge the arb interrupt */
@@ -1687,13 +1687,13 @@ static void xl_asb_bh(struct net_device *dev)
1687 ret_code = readb(xl_mmio + MMIO_MACDATA) ; 1687 ret_code = readb(xl_mmio + MMIO_MACDATA) ;
1688 switch (ret_code) { 1688 switch (ret_code) {
1689 case 0x01: 1689 case 0x01:
1690 printk(KERN_INFO "%s: ASB Command, unrecognized command code \n",dev->name) ; 1690 printk(KERN_INFO "%s: ASB Command, unrecognized command code\n",dev->name);
1691 break ; 1691 break ;
1692 case 0x26: 1692 case 0x26:
1693 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer \n", dev->name) ; 1693 printk(KERN_INFO "%s: ASB Command, unexpected receive buffer\n", dev->name);
1694 break ; 1694 break ;
1695 case 0x40: 1695 case 0x40:
1696 printk(KERN_INFO "%s: ASB Command, Invalid Station ID \n", dev->name) ; 1696 printk(KERN_INFO "%s: ASB Command, Invalid Station ID\n", dev->name);
1697 break ; 1697 break ;
1698 } 1698 }
1699 xl_priv->asb_queued = 0 ; 1699 xl_priv->asb_queued = 0 ;
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 1a0967246e2f..eebdaae24328 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -986,7 +986,7 @@ static void open_sap(unsigned char type, struct net_device *dev)
986static void tok_set_multicast_list(struct net_device *dev) 986static void tok_set_multicast_list(struct net_device *dev)
987{ 987{
988 struct tok_info *ti = netdev_priv(dev); 988 struct tok_info *ti = netdev_priv(dev);
989 struct dev_mc_list *mclist; 989 struct netdev_hw_addr *ha;
990 unsigned char address[4]; 990 unsigned char address[4];
991 991
992 int i; 992 int i;
@@ -995,11 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/ 995 /*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return; 996 if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
997 address[0] = address[1] = address[2] = address[3] = 0; 997 address[0] = address[1] = address[2] = address[3] = 0;
998 netdev_for_each_mc_addr(mclist, dev) { 998 netdev_for_each_mc_addr(ha, dev) {
999 address[0] |= mclist->dmi_addr[2]; 999 address[0] |= ha->addr[2];
1000 address[1] |= mclist->dmi_addr[3]; 1000 address[1] |= ha->addr[3];
1001 address[2] |= mclist->dmi_addr[4]; 1001 address[2] |= ha->addr[4];
1002 address[3] |= mclist->dmi_addr[5]; 1002 address[3] |= ha->addr[5];
1003 } 1003 }
1004 SET_PAGE(ti->srb_page); 1004 SET_PAGE(ti->srb_page);
1005 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++) 1005 for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index 7a5fbf5a9d71..5bd140704533 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -358,7 +358,7 @@ static int __devinit streamer_init_one(struct pci_dev *pdev,
358 pcr |= PCI_COMMAND_SERR; 358 pcr |= PCI_COMMAND_SERR;
359 pci_write_config_word (pdev, PCI_COMMAND, pcr); 359 pci_write_config_word (pdev, PCI_COMMAND, pcr);
360 360
361 printk("%s \n", version); 361 printk("%s\n", version);
362 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name, 362 printk("%s: %s. I/O at %hx, MMIO at %p, using irq %d\n",dev->name,
363 streamer_priv->streamer_card_name, 363 streamer_priv->streamer_card_name,
364 (unsigned int) dev->base_addr, 364 (unsigned int) dev->base_addr,
@@ -651,7 +651,7 @@ static int streamer_open(struct net_device *dev)
651#if STREAMER_DEBUG 651#if STREAMER_DEBUG
652 writew(readw(streamer_mmio + LAPWWO), 652 writew(readw(streamer_mmio + LAPWWO),
653 streamer_mmio + LAPA); 653 streamer_mmio + LAPA);
654 printk("srb open request: \n"); 654 printk("srb open request:\n");
655 for (i = 0; i < 16; i++) { 655 for (i = 0; i < 16; i++) {
656 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC))); 656 printk("%x:", ntohs(readw(streamer_mmio + LAPDINC)));
657 } 657 }
@@ -701,7 +701,7 @@ static int streamer_open(struct net_device *dev)
701 if (srb_word != 0) { 701 if (srb_word != 0) {
702 if (srb_word == 0x07) { 702 if (srb_word == 0x07) {
703 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */ 703 if (!streamer_priv->streamer_ring_speed && open_finished) { /* Autosense , first time around */
704 printk(KERN_WARNING "%s: Retrying at different ring speed \n", 704 printk(KERN_WARNING "%s: Retrying at different ring speed\n",
705 dev->name); 705 dev->name);
706 open_finished = 0; 706 open_finished = 0;
707 } else { 707 } else {
@@ -717,7 +717,7 @@ static int streamer_open(struct net_device *dev)
717 ((error_code & 0x0f) == 0x0d)) 717 ((error_code & 0x0f) == 0x0d))
718 { 718 {
719 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name); 719 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n", dev->name);
720 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n", dev->name); 720 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n", dev->name);
721 free_irq(dev->irq, dev); 721 free_irq(dev->irq, dev);
722 return -EIO; 722 return -EIO;
723 } 723 }
@@ -923,7 +923,7 @@ static void streamer_rx(struct net_device *dev)
923 923
924 if (rx_desc->status & 0x7E830000) { /* errors */ 924 if (rx_desc->status & 0x7E830000) { /* errors */
925 if (streamer_priv->streamer_message_level) { 925 if (streamer_priv->streamer_message_level) {
926 printk(KERN_WARNING "%s: Rx Error %x \n", 926 printk(KERN_WARNING "%s: Rx Error %x\n",
927 dev->name, rx_desc->status); 927 dev->name, rx_desc->status);
928 } 928 }
929 } else { /* received without errors */ 929 } else { /* received without errors */
@@ -936,7 +936,7 @@ static void streamer_rx(struct net_device *dev)
936 936
937 if (skb == NULL) 937 if (skb == NULL)
938 { 938 {
939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n", dev->name); 939 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n", dev->name);
940 dev->stats.rx_dropped++; 940 dev->stats.rx_dropped++;
941 } else { /* we allocated an skb OK */ 941 } else { /* we allocated an skb OK */
942 if (buffer_cnt == 1) { 942 if (buffer_cnt == 1) {
@@ -1267,7 +1267,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
1267 netdev_priv(dev); 1267 netdev_priv(dev);
1268 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio; 1268 __u8 __iomem *streamer_mmio = streamer_priv->streamer_mmio;
1269 __u8 options = 0; 1269 __u8 options = 0;
1270 struct dev_mc_list *dmi; 1270 struct netdev_hw_addr *ha;
1271 unsigned char dev_mc_address[5]; 1271 unsigned char dev_mc_address[5];
1272 1272
1273 writel(streamer_priv->srb, streamer_mmio + LAPA); 1273 writel(streamer_priv->srb, streamer_mmio + LAPA);
@@ -1303,11 +1303,11 @@ static void streamer_set_rx_mode(struct net_device *dev)
1303 writel(streamer_priv->srb,streamer_mmio+LAPA); 1303 writel(streamer_priv->srb,streamer_mmio+LAPA);
1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1304 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1305 1305
1306 netdev_for_each_mc_addr(dmi, dev) { 1306 netdev_for_each_mc_addr(ha, dev) {
1307 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1307 dev_mc_address[0] |= ha->addr[2];
1308 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1308 dev_mc_address[1] |= ha->addr[3];
1309 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1309 dev_mc_address[2] |= ha->addr[4];
1310 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1310 dev_mc_address[3] |= ha->addr[5];
1311 } 1311 }
1312 1312
1313 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC); 1313 writew(htons(SRB_SET_FUNC_ADDRESS << 8),streamer_mmio+LAPDINC);
@@ -1364,7 +1364,7 @@ static void streamer_srb_bh(struct net_device *dev)
1364 case 0x00: 1364 case 0x00:
1365 break; 1365 break;
1366 case 0x01: 1366 case 0x01:
1367 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name); 1367 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1368 break; 1368 break;
1369 case 0x04: 1369 case 0x04:
1370 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1370 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1392,13 +1392,13 @@ static void streamer_srb_bh(struct net_device *dev)
1392 case 0x00: 1392 case 0x00:
1393 break; 1393 break;
1394 case 0x01: 1394 case 0x01:
1395 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1395 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1396 break; 1396 break;
1397 case 0x04: 1397 case 0x04:
1398 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1398 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
1399 break; 1399 break;
1400 case 0x39: /* Must deal with this if individual multicast addresses used */ 1400 case 0x39: /* Must deal with this if individual multicast addresses used */
1401 printk(KERN_INFO "%s: Group address not found \n", dev->name); 1401 printk(KERN_INFO "%s: Group address not found\n", dev->name);
1402 break; 1402 break;
1403 default: 1403 default:
1404 break; 1404 break;
@@ -1414,10 +1414,10 @@ static void streamer_srb_bh(struct net_device *dev)
1414 switch (srb_word) { 1414 switch (srb_word) {
1415 case 0x00: 1415 case 0x00:
1416 if (streamer_priv->streamer_message_level) 1416 if (streamer_priv->streamer_message_level)
1417 printk(KERN_INFO "%s: Functional Address Mask Set \n", dev->name); 1417 printk(KERN_INFO "%s: Functional Address Mask Set\n", dev->name);
1418 break; 1418 break;
1419 case 0x01: 1419 case 0x01:
1420 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1420 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1421 break; 1421 break;
1422 case 0x04: 1422 case 0x04:
1423 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1423 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1448,7 +1448,7 @@ static void streamer_srb_bh(struct net_device *dev)
1448 } 1448 }
1449 break; 1449 break;
1450 case 0x01: 1450 case 0x01:
1451 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1451 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1452 break; 1452 break;
1453 case 0x04: 1453 case 0x04:
1454 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1454 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1467,7 +1467,7 @@ static void streamer_srb_bh(struct net_device *dev)
1467 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name); 1467 printk(KERN_INFO "%s: Read Source Routing Counters issued\n", dev->name);
1468 break; 1468 break;
1469 case 0x01: 1469 case 0x01:
1470 printk(KERN_WARNING "%s: Unrecognized srb command \n", dev->name); 1470 printk(KERN_WARNING "%s: Unrecognized srb command\n", dev->name);
1471 break; 1471 break;
1472 case 0x04: 1472 case 0x04:
1473 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name); 1473 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n", dev->name);
@@ -1556,7 +1556,7 @@ static void streamer_arb_cmd(struct net_device *dev)
1556 (streamer_mmio + LAPDINC))); 1556 (streamer_mmio + LAPDINC)));
1557 } 1557 }
1558 1558
1559 printk("next %04x, fs %02x, len %04x \n", next, 1559 printk("next %04x, fs %02x, len %04x\n", next,
1560 status, len); 1560 status, len);
1561 } 1561 }
1562#endif 1562#endif
@@ -1593,7 +1593,7 @@ static void streamer_arb_cmd(struct net_device *dev)
1593 1593
1594 mac_frame->protocol = tr_type_trans(mac_frame, dev); 1594 mac_frame->protocol = tr_type_trans(mac_frame, dev);
1595#if STREAMER_NETWORK_MONITOR 1595#if STREAMER_NETWORK_MONITOR
1596 printk(KERN_WARNING "%s: Received MAC Frame, details: \n", 1596 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",
1597 dev->name); 1597 dev->name);
1598 mac_hdr = tr_hdr(mac_frame); 1598 mac_hdr = tr_hdr(mac_frame);
1599 printk(KERN_WARNING 1599 printk(KERN_WARNING
@@ -1669,15 +1669,15 @@ drop_frame:
1669 /* If serious error */ 1669 /* If serious error */
1670 if (streamer_priv->streamer_message_level) { 1670 if (streamer_priv->streamer_message_level) {
1671 if (lan_status_diff & LSC_SIG_LOSS) 1671 if (lan_status_diff & LSC_SIG_LOSS)
1672 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name); 1672 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1673 if (lan_status_diff & LSC_HARD_ERR) 1673 if (lan_status_diff & LSC_HARD_ERR)
1674 printk(KERN_INFO "%s: Beaconing \n", dev->name); 1674 printk(KERN_INFO "%s: Beaconing\n", dev->name);
1675 if (lan_status_diff & LSC_SOFT_ERR) 1675 if (lan_status_diff & LSC_SOFT_ERR)
1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n", dev->name); 1676 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n", dev->name);
1677 if (lan_status_diff & LSC_TRAN_BCN) 1677 if (lan_status_diff & LSC_TRAN_BCN)
1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name); 1678 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n", dev->name);
1679 if (lan_status_diff & LSC_SS) 1679 if (lan_status_diff & LSC_SS)
1680 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1680 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1681 if (lan_status_diff & LSC_RING_REC) 1681 if (lan_status_diff & LSC_RING_REC)
1682 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name); 1682 printk(KERN_INFO "%s: Ring recovery ongoing\n", dev->name);
1683 if (lan_status_diff & LSC_FDX_MODE) 1683 if (lan_status_diff & LSC_FDX_MODE)
@@ -1686,7 +1686,7 @@ drop_frame:
1686 1686
1687 if (lan_status_diff & LSC_CO) { 1687 if (lan_status_diff & LSC_CO) {
1688 if (streamer_priv->streamer_message_level) 1688 if (streamer_priv->streamer_message_level)
1689 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1689 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1690 1690
1691 /* Issue READ.LOG command */ 1691 /* Issue READ.LOG command */
1692 1692
@@ -1716,7 +1716,7 @@ drop_frame:
1716 streamer_priv->streamer_lan_status = lan_status; 1716 streamer_priv->streamer_lan_status = lan_status;
1717 } /* Lan.change.status */ 1717 } /* Lan.change.status */
1718 else 1718 else
1719 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); 1719 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1720} 1720}
1721 1721
1722static void streamer_asb_bh(struct net_device *dev) 1722static void streamer_asb_bh(struct net_device *dev)
@@ -1747,10 +1747,10 @@ static void streamer_asb_bh(struct net_device *dev)
1747 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8; 1747 rc=ntohs(readw(streamer_mmio+LAPD)) >> 8;
1748 switch (rc) { 1748 switch (rc) {
1749 case 0x01: 1749 case 0x01:
1750 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); 1750 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1751 break; 1751 break;
1752 case 0x26: 1752 case 0x26:
1753 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); 1753 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1754 break; 1754 break;
1755 case 0xFF: 1755 case 0xFF:
1756 /* Valid response, everything should be ok again */ 1756 /* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index 3a25e0434ae2..3d2fbe60b46e 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -302,7 +302,7 @@ static int olympic_init(struct net_device *dev)
302 olympic_priv=netdev_priv(dev); 302 olympic_priv=netdev_priv(dev);
303 olympic_mmio=olympic_priv->olympic_mmio; 303 olympic_mmio=olympic_priv->olympic_mmio;
304 304
305 printk("%s \n", version); 305 printk("%s\n", version);
306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq); 306 printk("%s. I/O at %hx, MMIO at %p, LAP at %p, using irq %d\n", olympic_priv->olympic_card_name, (unsigned int) dev->base_addr,olympic_priv->olympic_mmio, olympic_priv->olympic_lap, dev->irq);
307 307
308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL); 308 writel(readl(olympic_mmio+BCTL) | BCTL_SOFTRESET,olympic_mmio+BCTL);
@@ -468,7 +468,7 @@ static int olympic_open(struct net_device *dev)
468#if OLYMPIC_DEBUG 468#if OLYMPIC_DEBUG
469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA)); 469 printk("LAPWWO: %x, LAPA: %x\n",readw(olympic_mmio+LAPWWO), readl(olympic_mmio+LAPA));
470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK)); 470 printk("SISR Mask = %04x\n", readl(olympic_mmio+SISR_MASK));
471 printk("Before the open command \n"); 471 printk("Before the open command\n");
472#endif 472#endif
473 do { 473 do {
474 memset_io(init_srb,0,SRB_COMMAND_SIZE); 474 memset_io(init_srb,0,SRB_COMMAND_SIZE);
@@ -520,7 +520,7 @@ static int olympic_open(struct net_device *dev)
520 break; 520 break;
521 } 521 }
522 if (time_after(jiffies, t + 10*HZ)) { 522 if (time_after(jiffies, t + 10*HZ)) {
523 printk(KERN_WARNING "%s: SRB timed out. \n",dev->name) ; 523 printk(KERN_WARNING "%s: SRB timed out.\n",dev->name);
524 olympic_priv->srb_queued=0; 524 olympic_priv->srb_queued=0;
525 break ; 525 break ;
526 } 526 }
@@ -549,7 +549,7 @@ static int olympic_open(struct net_device *dev)
549 break; 549 break;
550 case 0x07: 550 case 0x07:
551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */ 551 if (!olympic_priv->olympic_ring_speed && open_finished) { /* Autosense , first time around */
552 printk(KERN_WARNING "%s: Retrying at different ring speed \n", dev->name); 552 printk(KERN_WARNING "%s: Retrying at different ring speed\n", dev->name);
553 open_finished = 0 ; 553 open_finished = 0 ;
554 continue; 554 continue;
555 } 555 }
@@ -558,7 +558,7 @@ static int olympic_open(struct net_device *dev)
558 558
559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) { 559 if (!olympic_priv->olympic_ring_speed && ((err & 0x0f) == 0x0d)) {
560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name); 560 printk(KERN_WARNING "%s: Tried to autosense ring speed with no monitors present\n",dev->name);
561 printk(KERN_WARNING "%s: Please try again with a specified ring speed \n",dev->name); 561 printk(KERN_WARNING "%s: Please try again with a specified ring speed\n",dev->name);
562 } else { 562 } else {
563 printk(KERN_WARNING "%s: %s - %s\n", dev->name, 563 printk(KERN_WARNING "%s: %s - %s\n", dev->name,
564 open_maj_error[(err & 0xf0) >> 4], 564 open_maj_error[(err & 0xf0) >> 4],
@@ -759,7 +759,7 @@ static void olympic_rx(struct net_device *dev)
759 olympic_priv->rx_status_last_received++ ; 759 olympic_priv->rx_status_last_received++ ;
760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1); 760 olympic_priv->rx_status_last_received &= (OLYMPIC_RX_RING_SIZE -1);
761#if OLYMPIC_DEBUG 761#if OLYMPIC_DEBUG
762 printk("rx status: %x rx len: %x \n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen)); 762 printk("rx status: %x rx len: %x\n", le32_to_cpu(rx_status->status_buffercnt), le32_to_cpu(rx_status->fragmentcnt_framelen));
763#endif 763#endif
764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff; 764 length = le32_to_cpu(rx_status->fragmentcnt_framelen) & 0xffff;
765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff; 765 buffer_cnt = le32_to_cpu(rx_status->status_buffercnt) & 0xffff;
@@ -774,15 +774,15 @@ static void olympic_rx(struct net_device *dev)
774 if (l_status_buffercnt & 0x3B000000) { 774 if (l_status_buffercnt & 0x3B000000) {
775 if (olympic_priv->olympic_message_level) { 775 if (olympic_priv->olympic_message_level) {
776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */ 776 if (l_status_buffercnt & (1<<29)) /* Rx Frame Truncated */
777 printk(KERN_WARNING "%s: Rx Frame Truncated \n",dev->name); 777 printk(KERN_WARNING "%s: Rx Frame Truncated\n",dev->name);
778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */ 778 if (l_status_buffercnt & (1<<28)) /*Rx receive overrun */
779 printk(KERN_WARNING "%s: Rx Frame Receive overrun \n",dev->name); 779 printk(KERN_WARNING "%s: Rx Frame Receive overrun\n",dev->name);
780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */ 780 if (l_status_buffercnt & (1<<27)) /* No receive buffers */
781 printk(KERN_WARNING "%s: No receive buffers \n",dev->name); 781 printk(KERN_WARNING "%s: No receive buffers\n",dev->name);
782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */ 782 if (l_status_buffercnt & (1<<25)) /* Receive frame error detect */
783 printk(KERN_WARNING "%s: Receive frame error detect \n",dev->name); 783 printk(KERN_WARNING "%s: Receive frame error detect\n",dev->name);
784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */ 784 if (l_status_buffercnt & (1<<24)) /* Received Error Detect */
785 printk(KERN_WARNING "%s: Received Error Detect \n",dev->name); 785 printk(KERN_WARNING "%s: Received Error Detect\n",dev->name);
786 } 786 }
787 olympic_priv->rx_ring_last_received += i ; 787 olympic_priv->rx_ring_last_received += i ;
788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ; 788 olympic_priv->rx_ring_last_received &= (OLYMPIC_RX_RING_SIZE -1) ;
@@ -796,7 +796,7 @@ static void olympic_rx(struct net_device *dev)
796 } 796 }
797 797
798 if (skb == NULL) { 798 if (skb == NULL) {
799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers. \n",dev->name) ; 799 printk(KERN_WARNING "%s: Not enough memory to copy packet to upper layers.\n",dev->name) ;
800 dev->stats.rx_dropped++; 800 dev->stats.rx_dropped++;
801 /* Update counters even though we don't transfer the frame */ 801 /* Update counters even though we don't transfer the frame */
802 olympic_priv->rx_ring_last_received += i ; 802 olympic_priv->rx_ring_last_received += i ;
@@ -1101,7 +1101,7 @@ static int olympic_close(struct net_device *dev)
1101 } 1101 }
1102 1102
1103 if (t == 0) { 1103 if (t == 0) {
1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal. \n",dev->name) ; 1104 printk(KERN_WARNING "%s: SRB timed out. May not be fatal.\n",dev->name);
1105 } 1105 }
1106 olympic_priv->srb_queued=0; 1106 olympic_priv->srb_queued=0;
1107 } 1107 }
@@ -1139,7 +1139,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ; 1139 u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
1140 u8 options = 0; 1140 u8 options = 0;
1141 u8 __iomem *srb; 1141 u8 __iomem *srb;
1142 struct dev_mc_list *dmi; 1142 struct netdev_hw_addr *ha;
1143 unsigned char dev_mc_address[4] ; 1143 unsigned char dev_mc_address[4] ;
1144 1144
1145 writel(olympic_priv->srb,olympic_mmio+LAPA); 1145 writel(olympic_priv->srb,olympic_mmio+LAPA);
@@ -1177,11 +1177,11 @@ static void olympic_set_rx_mode(struct net_device *dev)
1177 1177
1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ; 1178 dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
1179 1179
1180 netdev_for_each_mc_addr(dmi, dev) { 1180 netdev_for_each_mc_addr(ha, dev) {
1181 dev_mc_address[0] |= dmi->dmi_addr[2] ; 1181 dev_mc_address[0] |= ha->addr[2];
1182 dev_mc_address[1] |= dmi->dmi_addr[3] ; 1182 dev_mc_address[1] |= ha->addr[3];
1183 dev_mc_address[2] |= dmi->dmi_addr[4] ; 1183 dev_mc_address[2] |= ha->addr[4];
1184 dev_mc_address[3] |= dmi->dmi_addr[5] ; 1184 dev_mc_address[3] |= ha->addr[5];
1185 } 1185 }
1186 1186
1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0); 1187 writeb(SRB_SET_FUNC_ADDRESS,srb+0);
@@ -1239,7 +1239,7 @@ static void olympic_srb_bh(struct net_device *dev)
1239 case 0x00: 1239 case 0x00:
1240 break ; 1240 break ;
1241 case 0x01: 1241 case 0x01:
1242 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1242 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1243 break ; 1243 break ;
1244 case 0x04: 1244 case 0x04:
1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name); 1245 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name);
@@ -1266,13 +1266,13 @@ static void olympic_srb_bh(struct net_device *dev)
1266 case 0x00: 1266 case 0x00:
1267 break ; 1267 break ;
1268 case 0x01: 1268 case 0x01:
1269 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1269 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1270 break ; 1270 break ;
1271 case 0x04: 1271 case 0x04:
1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1272 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
1273 break ; 1273 break ;
1274 case 0x39: /* Must deal with this if individual multicast addresses used */ 1274 case 0x39: /* Must deal with this if individual multicast addresses used */
1275 printk(KERN_INFO "%s: Group address not found \n",dev->name); 1275 printk(KERN_INFO "%s: Group address not found\n",dev->name);
1276 break ; 1276 break ;
1277 default: 1277 default:
1278 break ; 1278 break ;
@@ -1287,10 +1287,10 @@ static void olympic_srb_bh(struct net_device *dev)
1287 switch (readb(srb+2)) { 1287 switch (readb(srb+2)) {
1288 case 0x00: 1288 case 0x00:
1289 if (olympic_priv->olympic_message_level) 1289 if (olympic_priv->olympic_message_level)
1290 printk(KERN_INFO "%s: Functional Address Mask Set \n",dev->name) ; 1290 printk(KERN_INFO "%s: Functional Address Mask Set\n",dev->name);
1291 break ; 1291 break ;
1292 case 0x01: 1292 case 0x01:
1293 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1293 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1294 break ; 1294 break ;
1295 case 0x04: 1295 case 0x04:
1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1296 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1310,7 +1310,7 @@ static void olympic_srb_bh(struct net_device *dev)
1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ; 1310 printk(KERN_INFO "%s: Read Log issued\n",dev->name) ;
1311 break ; 1311 break ;
1312 case 0x01: 1312 case 0x01:
1313 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1313 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1314 break ; 1314 break ;
1315 case 0x04: 1315 case 0x04:
1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1316 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1328,7 +1328,7 @@ static void olympic_srb_bh(struct net_device *dev)
1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ; 1328 printk(KERN_INFO "%s: Read Source Routing Counters issued\n",dev->name) ;
1329 break ; 1329 break ;
1330 case 0x01: 1330 case 0x01:
1331 printk(KERN_WARNING "%s: Unrecognized srb command \n",dev->name) ; 1331 printk(KERN_WARNING "%s: Unrecognized srb command\n",dev->name);
1332 break ; 1332 break ;
1333 case 0x04: 1333 case 0x04:
1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ; 1334 printk(KERN_WARNING "%s: Adapter must be open for this operation, doh!!\n",dev->name) ;
@@ -1404,7 +1404,7 @@ static void olympic_arb_cmd(struct net_device *dev)
1404 printk("Loc %d = %02x\n",i,readb(frame_data + i)); 1404 printk("Loc %d = %02x\n",i,readb(frame_data + i));
1405 } 1405 }
1406 1406
1407 printk("next %04x, fs %02x, len %04x \n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length))); 1407 printk("next %04x, fs %02x, len %04x\n",readw(buf_ptr+offsetof(struct mac_receive_buffer,next)), readb(buf_ptr+offsetof(struct mac_receive_buffer,frame_status)), readw(buf_ptr+offsetof(struct mac_receive_buffer,buffer_length)));
1408} 1408}
1409#endif 1409#endif
1410 mac_frame = dev_alloc_skb(frame_len) ; 1410 mac_frame = dev_alloc_skb(frame_len) ;
@@ -1426,7 +1426,7 @@ static void olympic_arb_cmd(struct net_device *dev)
1426 1426
1427 if (olympic_priv->olympic_network_monitor) { 1427 if (olympic_priv->olympic_network_monitor) {
1428 struct trh_hdr *mac_hdr; 1428 struct trh_hdr *mac_hdr;
1429 printk(KERN_WARNING "%s: Received MAC Frame, details: \n",dev->name); 1429 printk(KERN_WARNING "%s: Received MAC Frame, details:\n",dev->name);
1430 mac_hdr = tr_hdr(mac_frame); 1430 mac_hdr = tr_hdr(mac_frame);
1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n", 1431 printk(KERN_WARNING "%s: MAC Frame Dest. Addr: %pM\n",
1432 dev->name, mac_hdr->daddr); 1432 dev->name, mac_hdr->daddr);
@@ -1489,20 +1489,20 @@ drop_frame:
1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL); 1489 writel(readl(olympic_mmio+BCTL)&~(3<<13),olympic_mmio+BCTL);
1490 netif_stop_queue(dev); 1490 netif_stop_queue(dev);
1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ; 1491 olympic_priv->srb = readw(olympic_priv->olympic_lap + LAPWWO) ;
1492 printk(KERN_WARNING "%s: Adapter has been closed \n", dev->name) ; 1492 printk(KERN_WARNING "%s: Adapter has been closed\n", dev->name);
1493 } /* If serious error */ 1493 } /* If serious error */
1494 1494
1495 if (olympic_priv->olympic_message_level) { 1495 if (olympic_priv->olympic_message_level) {
1496 if (lan_status_diff & LSC_SIG_LOSS) 1496 if (lan_status_diff & LSC_SIG_LOSS)
1497 printk(KERN_WARNING "%s: No receive signal detected \n", dev->name) ; 1497 printk(KERN_WARNING "%s: No receive signal detected\n", dev->name);
1498 if (lan_status_diff & LSC_HARD_ERR) 1498 if (lan_status_diff & LSC_HARD_ERR)
1499 printk(KERN_INFO "%s: Beaconing \n",dev->name); 1499 printk(KERN_INFO "%s: Beaconing\n",dev->name);
1500 if (lan_status_diff & LSC_SOFT_ERR) 1500 if (lan_status_diff & LSC_SOFT_ERR)
1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame \n",dev->name); 1501 printk(KERN_WARNING "%s: Adapter transmitted Soft Error Report Mac Frame\n",dev->name);
1502 if (lan_status_diff & LSC_TRAN_BCN) 1502 if (lan_status_diff & LSC_TRAN_BCN)
1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name); 1503 printk(KERN_INFO "%s: We are tranmitting the beacon, aaah\n",dev->name);
1504 if (lan_status_diff & LSC_SS) 1504 if (lan_status_diff & LSC_SS)
1505 printk(KERN_INFO "%s: Single Station on the ring \n", dev->name); 1505 printk(KERN_INFO "%s: Single Station on the ring\n", dev->name);
1506 if (lan_status_diff & LSC_RING_REC) 1506 if (lan_status_diff & LSC_RING_REC)
1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name); 1507 printk(KERN_INFO "%s: Ring recovery ongoing\n",dev->name);
1508 if (lan_status_diff & LSC_FDX_MODE) 1508 if (lan_status_diff & LSC_FDX_MODE)
@@ -1512,7 +1512,7 @@ drop_frame:
1512 if (lan_status_diff & LSC_CO) { 1512 if (lan_status_diff & LSC_CO) {
1513 1513
1514 if (olympic_priv->olympic_message_level) 1514 if (olympic_priv->olympic_message_level)
1515 printk(KERN_INFO "%s: Counter Overflow \n", dev->name); 1515 printk(KERN_INFO "%s: Counter Overflow\n", dev->name);
1516 1516
1517 /* Issue READ.LOG command */ 1517 /* Issue READ.LOG command */
1518 1518
@@ -1551,7 +1551,7 @@ drop_frame:
1551 1551
1552 } /* Lan.change.status */ 1552 } /* Lan.change.status */
1553 else 1553 else
1554 printk(KERN_WARNING "%s: Unknown arb command \n", dev->name); 1554 printk(KERN_WARNING "%s: Unknown arb command\n", dev->name);
1555} 1555}
1556 1556
1557static void olympic_asb_bh(struct net_device *dev) 1557static void olympic_asb_bh(struct net_device *dev)
@@ -1578,10 +1578,10 @@ static void olympic_asb_bh(struct net_device *dev)
1578 if (olympic_priv->asb_queued == 2) { 1578 if (olympic_priv->asb_queued == 2) {
1579 switch (readb(asb_block+2)) { 1579 switch (readb(asb_block+2)) {
1580 case 0x01: 1580 case 0x01:
1581 printk(KERN_WARNING "%s: Unrecognized command code \n", dev->name); 1581 printk(KERN_WARNING "%s: Unrecognized command code\n", dev->name);
1582 break ; 1582 break ;
1583 case 0x26: 1583 case 0x26:
1584 printk(KERN_WARNING "%s: Unrecognized buffer address \n", dev->name); 1584 printk(KERN_WARNING "%s: Unrecognized buffer address\n", dev->name);
1585 break ; 1585 break ;
1586 case 0xFF: 1586 case 0xFF:
1587 /* Valid response, everything should be ok again */ 1587 /* Valid response, everything should be ok again */
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index 8b508c922410..c169fd05dde0 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1211,17 +1211,17 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
1211 } 1211 }
1212 else 1212 else
1213 { 1213 {
1214 struct dev_mc_list *mclist; 1214 struct netdev_hw_addr *ha;
1215 1215
1216 netdev_for_each_mc_addr(mclist, dev) { 1216 netdev_for_each_mc_addr(ha, dev) {
1217 ((char *)(&tp->ocpl.FunctAddr))[0] |= 1217 ((char *)(&tp->ocpl.FunctAddr))[0] |=
1218 mclist->dmi_addr[2]; 1218 ha->addr[2];
1219 ((char *)(&tp->ocpl.FunctAddr))[1] |= 1219 ((char *)(&tp->ocpl.FunctAddr))[1] |=
1220 mclist->dmi_addr[3]; 1220 ha->addr[3];
1221 ((char *)(&tp->ocpl.FunctAddr))[2] |= 1221 ((char *)(&tp->ocpl.FunctAddr))[2] |=
1222 mclist->dmi_addr[4]; 1222 ha->addr[4];
1223 ((char *)(&tp->ocpl.FunctAddr))[3] |= 1223 ((char *)(&tp->ocpl.FunctAddr))[3] |=
1224 mclist->dmi_addr[5]; 1224 ha->addr[5];
1225 } 1225 }
1226 } 1226 }
1227 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR); 1227 tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
@@ -1390,7 +1390,7 @@ static int tms380tr_bringup_diags(struct net_device *dev)
1390 Status &= STS_MASK; 1390 Status &= STS_MASK;
1391 1391
1392 if(tms380tr_debug > 3) 1392 if(tms380tr_debug > 3)
1393 printk(KERN_DEBUG " %04X \n", Status); 1393 printk(KERN_DEBUG " %04X\n", Status);
1394 /* BUD successfully completed */ 1394 /* BUD successfully completed */
1395 if(Status == STS_INITIALIZE) 1395 if(Status == STS_INITIALIZE)
1396 return (1); 1396 return (1);
@@ -1846,7 +1846,7 @@ static void tms380tr_chk_irq(struct net_device *dev)
1846 break; 1846 break;
1847 1847
1848 case DMA_WRITE_ABORT: 1848 case DMA_WRITE_ABORT:
1849 printk(KERN_INFO "%s: DMA write operation aborted: \n", 1849 printk(KERN_INFO "%s: DMA write operation aborted:\n",
1850 dev->name); 1850 dev->name);
1851 switch (AdapterCheckBlock[1]) 1851 switch (AdapterCheckBlock[1])
1852 { 1852 {
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 5b1fbb3c3b51..a03730bd1da5 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -263,7 +263,7 @@ static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
263 return; 263 return;
264 udelay(10); 264 udelay(10);
265 } 265 }
266 printk(KERN_ERR "%s function time out \n", __func__); 266 printk(KERN_ERR "%s function time out\n", __func__);
267} 267}
268 268
269static int mii_speed(struct mii_if_info *mii) 269static int mii_speed(struct mii_if_info *mii)
@@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
704 704
705 if (i == 0) { 705 if (i == 0) {
706 data->txring[tx].buf0 = dma_map_single(NULL, skb->data, 706 data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
707 skb->len - skb->data_len, DMA_TO_DEVICE); 707 skb_headlen(skb), DMA_TO_DEVICE);
708 data->txring[tx].len = skb->len - skb->data_len; 708 data->txring[tx].len = skb_headlen(skb);
709 misc |= TSI108_TX_SOF; 709 misc |= TSI108_TX_SOF;
710 } else { 710 } else {
711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; 711 skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
@@ -1056,7 +1056,7 @@ static void tsi108_stop_ethernet(struct net_device *dev)
1056 return; 1056 return;
1057 udelay(10); 1057 udelay(10);
1058 } 1058 }
1059 printk(KERN_ERR "%s function time out \n", __func__); 1059 printk(KERN_ERR "%s function time out\n", __func__);
1060} 1060}
1061 1061
1062static void tsi108_reset_ether(struct tsi108_prv_data * data) 1062static void tsi108_reset_ether(struct tsi108_prv_data * data)
@@ -1186,15 +1186,15 @@ static void tsi108_set_rx_mode(struct net_device *dev)
1186 1186
1187 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) { 1187 if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
1188 int i; 1188 int i;
1189 struct dev_mc_list *mc; 1189 struct netdev_hw_addr *ha;
1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH; 1190 rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
1191 1191
1192 memset(data->mc_hash, 0, sizeof(data->mc_hash)); 1192 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1193 1193
1194 netdev_for_each_mc_addr(mc, dev) { 1194 netdev_for_each_mc_addr(ha, dev) {
1195 u32 hash, crc; 1195 u32 hash, crc;
1196 1196
1197 crc = ether_crc(6, mc->dmi_addr); 1197 crc = ether_crc(6, ha->addr);
1198 hash = crc >> 23; 1198 hash = crc >> 23;
1199 __set_bit(hash, &data->mc_hash[0]); 1199 __set_bit(hash, &data->mc_hash[0]);
1200 } 1200 }
@@ -1233,7 +1233,7 @@ static void tsi108_init_phy(struct net_device *dev)
1233 udelay(10); 1233 udelay(10);
1234 } 1234 }
1235 if (i == 0) 1235 if (i == 0)
1236 printk(KERN_ERR "%s function time out \n", __func__); 1236 printk(KERN_ERR "%s function time out\n", __func__);
1237 1237
1238 if (data->phy_type == TSI108_PHY_BCM54XX) { 1238 if (data->phy_type == TSI108_PHY_BCM54XX) {
1239 tsi108_write_mii(data, 0x09, 0x0300); 1239 tsi108_write_mii(data, 0x09, 0x0300);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 19cafc2b418d..9c0f29ce8ba7 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -671,15 +671,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
671{ 671{
672 struct de_private *de = netdev_priv(dev); 672 struct de_private *de = netdev_priv(dev);
673 u16 hash_table[32]; 673 u16 hash_table[32];
674 struct dev_mc_list *mclist; 674 struct netdev_hw_addr *ha;
675 int i; 675 int i;
676 u16 *eaddrs; 676 u16 *eaddrs;
677 677
678 memset(hash_table, 0, sizeof(hash_table)); 678 memset(hash_table, 0, sizeof(hash_table));
679 set_bit_le(255, hash_table); /* Broadcast entry */ 679 set_bit_le(255, hash_table); /* Broadcast entry */
680 /* This should work on big-endian machines as well. */ 680 /* This should work on big-endian machines as well. */
681 netdev_for_each_mc_addr(mclist, dev) { 681 netdev_for_each_mc_addr(ha, dev) {
682 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 682 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
683 683
684 set_bit_le(index, hash_table); 684 set_bit_le(index, hash_table);
685 } 685 }
@@ -700,13 +700,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
700static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 700static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
701{ 701{
702 struct de_private *de = netdev_priv(dev); 702 struct de_private *de = netdev_priv(dev);
703 struct dev_mc_list *mclist; 703 struct netdev_hw_addr *ha;
704 u16 *eaddrs; 704 u16 *eaddrs;
705 705
706 /* We have <= 14 addresses so we can use the wonderful 706 /* We have <= 14 addresses so we can use the wonderful
707 16 address perfect filtering of the Tulip. */ 707 16 address perfect filtering of the Tulip. */
708 netdev_for_each_mc_addr(mclist, dev) { 708 netdev_for_each_mc_addr(ha, dev) {
709 eaddrs = (u16 *)mclist->dmi_addr; 709 eaddrs = (u16 *) ha->addr;
710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 710 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 711 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 712 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 09b57193a16a..d818456f4713 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1951,7 +1951,7 @@ static void
1951SetMulticastFilter(struct net_device *dev) 1951SetMulticastFilter(struct net_device *dev)
1952{ 1952{
1953 struct de4x5_private *lp = netdev_priv(dev); 1953 struct de4x5_private *lp = netdev_priv(dev);
1954 struct dev_mc_list *dmi; 1954 struct netdev_hw_addr *ha;
1955 u_long iobase = dev->base_addr; 1955 u_long iobase = dev->base_addr;
1956 int i, bit, byte; 1956 int i, bit, byte;
1957 u16 hashcode; 1957 u16 hashcode;
@@ -1966,8 +1966,8 @@ SetMulticastFilter(struct net_device *dev)
1966 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) { 1966 if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
1967 omr |= OMR_PM; /* Pass all multicasts */ 1967 omr |= OMR_PM; /* Pass all multicasts */
1968 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */ 1968 } else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
1969 netdev_for_each_mc_addr(dmi, dev) { 1969 netdev_for_each_mc_addr(ha, dev) {
1970 addrs = dmi->dmi_addr; 1970 addrs = ha->addr;
1971 if ((*addrs & 0x01) == 1) { /* multicast address? */ 1971 if ((*addrs & 0x01) == 1) { /* multicast address? */
1972 crc = ether_crc_le(ETH_ALEN, addrs); 1972 crc = ether_crc_le(ETH_ALEN, addrs);
1973 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */ 1973 hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1983,8 +1983,8 @@ SetMulticastFilter(struct net_device *dev)
1983 } 1983 }
1984 } 1984 }
1985 } else { /* Perfect filtering */ 1985 } else { /* Perfect filtering */
1986 netdev_for_each_mc_addr(dmi, dev) { 1986 netdev_for_each_mc_addr(ha, dev) {
1987 addrs = dmi->dmi_addr; 1987 addrs = ha->addr;
1988 for (i=0; i<ETH_ALEN; i++) { 1988 for (i=0; i<ETH_ALEN; i++) {
1989 *(pa + (i&1)) = *addrs++; 1989 *(pa + (i&1)) = *addrs++;
1990 if (i & 0x01) pa += 4; 1990 if (i & 0x01) pa += 4;
@@ -5077,7 +5077,7 @@ mii_get_phy(struct net_device *dev)
5077 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */ 5077 lp->phy[k].spd.value = GENERIC_VALUE; /* TX & T4, H/F Duplex */
5078 lp->mii_cnt++; 5078 lp->mii_cnt++;
5079 lp->active++; 5079 lp->active++;
5080 printk("%s: Using generic MII device control. If the board doesn't operate, \nplease mail the following dump to the author:\n", dev->name); 5080 printk("%s: Using generic MII device control. If the board doesn't operate,\nplease mail the following dump to the author:\n", dev->name);
5081 j = de4x5_debug; 5081 j = de4x5_debug;
5082 de4x5_debug |= DEBUG_MII; 5082 de4x5_debug |= DEBUG_MII;
5083 de4x5_dbg_mii(dev, k); 5083 de4x5_dbg_mii(dev, k);
@@ -5337,7 +5337,7 @@ de4x5_dbg_open(struct net_device *dev)
5337 } 5337 }
5338 } 5338 }
5339 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf)); 5339 printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
5340 printk("Ring size: \nRX: %d\nTX: %d\n", 5340 printk("Ring size:\nRX: %d\nTX: %d\n",
5341 (short)lp->rxRingSize, 5341 (short)lp->rxRingSize,
5342 (short)lp->txRingSize); 5342 (short)lp->txRingSize);
5343 } 5343 }
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 9568156dea98..7278ecb823cb 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -1453,7 +1453,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1453 1453
1454static void dm9132_id_table(struct DEVICE *dev) 1454static void dm9132_id_table(struct DEVICE *dev)
1455{ 1455{
1456 struct dev_mc_list *mcptr; 1456 struct netdev_hw_addr *ha;
1457 u16 * addrptr; 1457 u16 * addrptr;
1458 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */ 1458 unsigned long ioaddr = dev->base_addr+0xc0; /* ID Table */
1459 u32 hash_val; 1459 u32 hash_val;
@@ -1477,8 +1477,8 @@ static void dm9132_id_table(struct DEVICE *dev)
1477 hash_table[3] = 0x8000; 1477 hash_table[3] = 0x8000;
1478 1478
1479 /* the multicast address in Hash Table : 64 bits */ 1479 /* the multicast address in Hash Table : 64 bits */
1480 netdev_for_each_mc_addr(mcptr, dev) { 1480 netdev_for_each_mc_addr(ha, dev) {
1481 hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f; 1481 hash_val = cal_CRC((char *) ha->addr, 6, 0) & 0x3f;
1482 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16); 1482 hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
1483 } 1483 }
1484 1484
@@ -1496,7 +1496,7 @@ static void dm9132_id_table(struct DEVICE *dev)
1496static void send_filter_frame(struct DEVICE *dev) 1496static void send_filter_frame(struct DEVICE *dev)
1497{ 1497{
1498 struct dmfe_board_info *db = netdev_priv(dev); 1498 struct dmfe_board_info *db = netdev_priv(dev);
1499 struct dev_mc_list *mcptr; 1499 struct netdev_hw_addr *ha;
1500 struct tx_desc *txptr; 1500 struct tx_desc *txptr;
1501 u16 * addrptr; 1501 u16 * addrptr;
1502 u32 * suptr; 1502 u32 * suptr;
@@ -1519,8 +1519,8 @@ static void send_filter_frame(struct DEVICE *dev)
1519 *suptr++ = 0xffff; 1519 *suptr++ = 0xffff;
1520 1520
1521 /* fit the multicast address */ 1521 /* fit the multicast address */
1522 netdev_for_each_mc_addr(mcptr, dev) { 1522 netdev_for_each_mc_addr(ha, dev) {
1523 addrptr = (u16 *) mcptr->dmi_addr; 1523 addrptr = (u16 *) ha->addr;
1524 *suptr++ = addrptr[0]; 1524 *suptr++ = addrptr[0];
1525 *suptr++ = addrptr[1]; 1525 *suptr++ = addrptr[1];
1526 *suptr++ = addrptr[2]; 1526 *suptr++ = addrptr[2];
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 3810db9dc2de..22e766e90063 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -991,15 +991,15 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
991{ 991{
992 struct tulip_private *tp = netdev_priv(dev); 992 struct tulip_private *tp = netdev_priv(dev);
993 u16 hash_table[32]; 993 u16 hash_table[32];
994 struct dev_mc_list *mclist; 994 struct netdev_hw_addr *ha;
995 int i; 995 int i;
996 u16 *eaddrs; 996 u16 *eaddrs;
997 997
998 memset(hash_table, 0, sizeof(hash_table)); 998 memset(hash_table, 0, sizeof(hash_table));
999 set_bit_le(255, hash_table); /* Broadcast entry */ 999 set_bit_le(255, hash_table); /* Broadcast entry */
1000 /* This should work on big-endian machines as well. */ 1000 /* This should work on big-endian machines as well. */
1001 netdev_for_each_mc_addr(mclist, dev) { 1001 netdev_for_each_mc_addr(ha, dev) {
1002 int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff; 1002 int index = ether_crc_le(ETH_ALEN, ha->addr) & 0x1ff;
1003 1003
1004 set_bit_le(index, hash_table); 1004 set_bit_le(index, hash_table);
1005 } 1005 }
@@ -1019,13 +1019,13 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
1019static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev) 1019static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
1020{ 1020{
1021 struct tulip_private *tp = netdev_priv(dev); 1021 struct tulip_private *tp = netdev_priv(dev);
1022 struct dev_mc_list *mclist; 1022 struct netdev_hw_addr *ha;
1023 u16 *eaddrs; 1023 u16 *eaddrs;
1024 1024
1025 /* We have <= 14 addresses so we can use the wonderful 1025 /* We have <= 14 addresses so we can use the wonderful
1026 16 address perfect filtering of the Tulip. */ 1026 16 address perfect filtering of the Tulip. */
1027 netdev_for_each_mc_addr(mclist, dev) { 1027 netdev_for_each_mc_addr(ha, dev) {
1028 eaddrs = (u16 *)mclist->dmi_addr; 1028 eaddrs = (u16 *) ha->addr;
1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1029 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1030 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1030 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
1031 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++; 1031 *setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
@@ -1062,7 +1062,7 @@ static void set_rx_mode(struct net_device *dev)
1062 } else if (tp->flags & MC_HASH_ONLY) { 1062 } else if (tp->flags & MC_HASH_ONLY) {
1063 /* Some work-alikes have only a 64-entry hash filter table. */ 1063 /* Some work-alikes have only a 64-entry hash filter table. */
1064 /* Should verify correctness on big-endian/__powerpc__ */ 1064 /* Should verify correctness on big-endian/__powerpc__ */
1065 struct dev_mc_list *mclist; 1065 struct netdev_hw_addr *ha;
1066 if (netdev_mc_count(dev) > 64) { 1066 if (netdev_mc_count(dev) > 64) {
1067 /* Arbitrary non-effective limit. */ 1067 /* Arbitrary non-effective limit. */
1068 tp->csr6 |= AcceptAllMulticast; 1068 tp->csr6 |= AcceptAllMulticast;
@@ -1070,18 +1070,21 @@ static void set_rx_mode(struct net_device *dev)
1070 } else { 1070 } else {
1071 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */ 1071 u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
1072 int filterbit; 1072 int filterbit;
1073 netdev_for_each_mc_addr(mclist, dev) { 1073 netdev_for_each_mc_addr(ha, dev) {
1074 if (tp->flags & COMET_MAC_ADDR) 1074 if (tp->flags & COMET_MAC_ADDR)
1075 filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr); 1075 filterbit = ether_crc_le(ETH_ALEN,
1076 ha->addr);
1076 else 1077 else
1077 filterbit = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1078 filterbit = ether_crc(ETH_ALEN,
1079 ha->addr) >> 26;
1078 filterbit &= 0x3f; 1080 filterbit &= 0x3f;
1079 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1081 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
1080 if (tulip_debug > 2) 1082 if (tulip_debug > 2)
1081 dev_info(&dev->dev, 1083 dev_info(&dev->dev,
1082 "Added filter for %pM %08x bit %d\n", 1084 "Added filter for %pM %08x bit %d\n",
1083 mclist->dmi_addr, 1085 ha->addr,
1084 ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit); 1086 ether_crc(ETH_ALEN, ha->addr),
1087 filterbit);
1085 } 1088 }
1086 if (mc_filter[0] == tp->mc_filter[0] && 1089 if (mc_filter[0] == tp->mc_filter[0] &&
1087 mc_filter[1] == tp->mc_filter[1]) 1090 mc_filter[1] == tp->mc_filter[1])
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index a589dd34891e..c7f972852921 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -1393,7 +1393,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
1393static void send_filter_frame(struct net_device *dev, int mc_cnt) 1393static void send_filter_frame(struct net_device *dev, int mc_cnt)
1394{ 1394{
1395 struct uli526x_board_info *db = netdev_priv(dev); 1395 struct uli526x_board_info *db = netdev_priv(dev);
1396 struct dev_mc_list *mcptr; 1396 struct netdev_hw_addr *ha;
1397 struct tx_desc *txptr; 1397 struct tx_desc *txptr;
1398 u16 * addrptr; 1398 u16 * addrptr;
1399 u32 * suptr; 1399 u32 * suptr;
@@ -1416,8 +1416,8 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
1416 *suptr++ = 0xffff << FLT_SHIFT; 1416 *suptr++ = 0xffff << FLT_SHIFT;
1417 1417
1418 /* fit the multicast address */ 1418 /* fit the multicast address */
1419 netdev_for_each_mc_addr(mcptr, dev) { 1419 netdev_for_each_mc_addr(ha, dev) {
1420 addrptr = (u16 *) mcptr->dmi_addr; 1420 addrptr = (u16 *) ha->addr;
1421 *suptr++ = addrptr[0] << FLT_SHIFT; 1421 *suptr++ = addrptr[0] << FLT_SHIFT;
1422 *suptr++ = addrptr[1] << FLT_SHIFT; 1422 *suptr++ = addrptr[1] << FLT_SHIFT;
1423 *suptr++ = addrptr[2] << FLT_SHIFT; 1423 *suptr++ = addrptr[2] << FLT_SHIFT;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 98dbf6cc1d68..18c8cedf4cac 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -1366,13 +1366,15 @@ static u32 __set_rx_mode(struct net_device *dev)
1366 memset(mc_filter, 0xff, sizeof(mc_filter)); 1366 memset(mc_filter, 0xff, sizeof(mc_filter));
1367 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1367 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1368 } else { 1368 } else {
1369 struct dev_mc_list *mclist; 1369 struct netdev_hw_addr *ha;
1370 1370
1371 memset(mc_filter, 0, sizeof(mc_filter)); 1371 memset(mc_filter, 0, sizeof(mc_filter));
1372 netdev_for_each_mc_addr(mclist, dev) { 1372 netdev_for_each_mc_addr(ha, dev) {
1373 int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F; 1373 int filbit;
1374 filterbit &= 0x3f; 1374
1375 mc_filter[filterbit >> 5] |= 1 << (filterbit & 31); 1375 filbit = (ether_crc(ETH_ALEN, ha->addr) >> 26) ^ 0x3F;
1376 filbit &= 0x3f;
1377 mc_filter[filbit >> 5] |= 1 << (filbit & 31);
1376 } 1378 }
1377 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys; 1379 rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1378 } 1380 }
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index acfeeb980562..a439e93be22d 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -350,9 +350,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
350 350
351#ifdef DEBUG 351#ifdef DEBUG
352 print_binary(status); 352 print_binary(status);
353 printk("tx status 0x%08x 0x%08x \n", 353 printk("tx status 0x%08x 0x%08x\n",
354 card->tx_buffer[0], card->tx_buffer[4]); 354 card->tx_buffer[0], card->tx_buffer[4]);
355 printk("rx status 0x%08x 0x%08x \n", 355 printk("rx status 0x%08x 0x%08x\n",
356 card->rx_buffer[0], card->rx_buffer[4]); 356 card->rx_buffer[0], card->rx_buffer[4]);
357#endif 357#endif
358 /* Handle shared irq and hotplug */ 358 /* Handle shared irq and hotplug */
@@ -462,7 +462,7 @@ static int xircom_open(struct net_device *dev)
462 struct xircom_private *xp = netdev_priv(dev); 462 struct xircom_private *xp = netdev_priv(dev);
463 int retval; 463 int retval;
464 enter("xircom_open"); 464 enter("xircom_open");
465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n", 465 pr_info("xircom cardbus adaptor found, registering as %s, using irq %i\n",
466 dev->name, dev->irq); 466 dev->name, dev->irq);
467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev); 467 retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
468 if (retval) { 468 if (retval) {
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 43265207d463..20a17938c62b 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -868,8 +868,8 @@ static void tun_sock_write_space(struct sock *sk)
868 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) 868 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
869 return; 869 return;
870 870
871 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 871 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
872 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 872 wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
873 POLLWRNORM | POLLWRBAND); 873 POLLWRNORM | POLLWRBAND);
874 874
875 tun = tun_sk(sk)->tun; 875 tun = tun_sk(sk)->tun;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 98d818daa77e..b94c4cce93c2 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -920,11 +920,11 @@ typhoon_set_rx_mode(struct net_device *dev)
920 /* Too many to match, or accept all multicasts. */ 920 /* Too many to match, or accept all multicasts. */
921 filter |= TYPHOON_RX_FILTER_ALL_MCAST; 921 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
922 } else if (!netdev_mc_empty(dev)) { 922 } else if (!netdev_mc_empty(dev)) {
923 struct dev_mc_list *mclist; 923 struct netdev_hw_addr *ha;
924 924
925 memset(mc_filter, 0, sizeof(mc_filter)); 925 memset(mc_filter, 0, sizeof(mc_filter));
926 netdev_for_each_mc_addr(mclist, dev) { 926 netdev_for_each_mc_addr(ha, dev) {
927 int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 927 int bit = ether_crc(ETH_ALEN, ha->addr) & 0x3f;
928 mc_filter[bit >> 5] |= 1 << (bit & 0x1f); 928 mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
929 } 929 }
930 930
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 1b0aef37e495..081f76bff341 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -1999,7 +1999,7 @@ static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1999static void ucc_geth_set_multi(struct net_device *dev) 1999static void ucc_geth_set_multi(struct net_device *dev)
2000{ 2000{
2001 struct ucc_geth_private *ugeth; 2001 struct ucc_geth_private *ugeth;
2002 struct dev_mc_list *dmi; 2002 struct netdev_hw_addr *ha;
2003 struct ucc_fast __iomem *uf_regs; 2003 struct ucc_fast __iomem *uf_regs;
2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 2004 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
2005 2005
@@ -2028,16 +2028,16 @@ static void ucc_geth_set_multi(struct net_device *dev)
2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0); 2028 out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0); 2029 out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
2030 2030
2031 netdev_for_each_mc_addr(dmi, dev) { 2031 netdev_for_each_mc_addr(ha, dev) {
2032 /* Only support group multicast for now. 2032 /* Only support group multicast for now.
2033 */ 2033 */
2034 if (!(dmi->dmi_addr[0] & 1)) 2034 if (!(ha->addr[0] & 1))
2035 continue; 2035 continue;
2036 2036
2037 /* Ask CPM to run CRC and set bit in 2037 /* Ask CPM to run CRC and set bit in
2038 * filter mask. 2038 * filter mask.
2039 */ 2039 */
2040 hw_add_addr_in_hash(ugeth, dmi->dmi_addr); 2040 hw_add_addr_in_hash(ugeth, ha->addr);
2041 } 2041 }
2042 } 2042 }
2043 } 2043 }
@@ -3883,7 +3883,7 @@ static int ucc_geth_probe(struct of_device* ofdev, const struct of_device_id *ma
3883 } 3883 }
3884 3884
3885 if (netif_msg_probe(&debug)) 3885 if (netif_msg_probe(&debug))
3886 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d) \n", 3886 printk(KERN_INFO "ucc_geth: UCC%1d at 0x%8x (irq = %d)\n",
3887 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs, 3887 ug_info->uf_info.ucc_num + 1, ug_info->uf_info.regs,
3888 ug_info->uf_info.irq); 3888 ug_info->uf_info.irq);
3889 3889
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 35f56fc82803..8e7d2374558b 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -558,16 +558,14 @@ static void asix_set_multicast(struct net_device *net)
558 * for our 8 byte filter buffer 558 * for our 8 byte filter buffer
559 * to avoid allocating memory that 559 * to avoid allocating memory that
560 * is tricky to free later */ 560 * is tricky to free later */
561 struct dev_mc_list *mc_list; 561 struct netdev_hw_addr *ha;
562 u32 crc_bits; 562 u32 crc_bits;
563 563
564 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 564 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
565 565
566 /* Build the multicast hash filter. */ 566 /* Build the multicast hash filter. */
567 netdev_for_each_mc_addr(mc_list, net) { 567 netdev_for_each_mc_addr(ha, net) {
568 crc_bits = 568 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
569 ether_crc(ETH_ALEN,
570 mc_list->dmi_addr) >> 26;
571 data->multi_filter[crc_bits >> 3] |= 569 data->multi_filter[crc_bits >> 3] |=
572 1 << (crc_bits & 7); 570 1 << (crc_bits & 7);
573 } 571 }
@@ -794,16 +792,14 @@ static void ax88172_set_multicast(struct net_device *net)
794 * for our 8 byte filter buffer 792 * for our 8 byte filter buffer
795 * to avoid allocating memory that 793 * to avoid allocating memory that
796 * is tricky to free later */ 794 * is tricky to free later */
797 struct dev_mc_list *mc_list; 795 struct netdev_hw_addr *ha;
798 u32 crc_bits; 796 u32 crc_bits;
799 797
800 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE); 798 memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
801 799
802 /* Build the multicast hash filter. */ 800 /* Build the multicast hash filter. */
803 netdev_for_each_mc_addr(mc_list, net) { 801 netdev_for_each_mc_addr(ha, net) {
804 crc_bits = 802 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
805 ether_crc(ETH_ALEN,
806 mc_list->dmi_addr) >> 26;
807 data->multi_filter[crc_bits >> 3] |= 803 data->multi_filter[crc_bits >> 3] |=
808 1 << (crc_bits & 7); 804 1 << (crc_bits & 7);
809 } 805 }
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 602e123b2741..97687d335903 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -629,7 +629,7 @@ static void catc_multicast(unsigned char *addr, u8 *multicast)
629static void catc_set_multicast_list(struct net_device *netdev) 629static void catc_set_multicast_list(struct net_device *netdev)
630{ 630{
631 struct catc *catc = netdev_priv(netdev); 631 struct catc *catc = netdev_priv(netdev);
632 struct dev_mc_list *mc; 632 struct netdev_hw_addr *ha;
633 u8 broadcast[6]; 633 u8 broadcast[6];
634 u8 rx = RxEnable | RxPolarity | RxMultiCast; 634 u8 rx = RxEnable | RxPolarity | RxMultiCast;
635 635
@@ -647,8 +647,8 @@ static void catc_set_multicast_list(struct net_device *netdev)
647 if (netdev->flags & IFF_ALLMULTI) { 647 if (netdev->flags & IFF_ALLMULTI) {
648 memset(catc->multicast, 0xff, 64); 648 memset(catc->multicast, 0xff, 64);
649 } else { 649 } else {
650 netdev_for_each_mc_addr(mc, netdev) { 650 netdev_for_each_mc_addr(ha, netdev) {
651 u32 crc = ether_crc_le(6, mc->dmi_addr); 651 u32 crc = ether_crc_le(6, ha->addr);
652 if (!catc->is_f5u011) { 652 if (!catc->is_f5u011) {
653 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); 653 catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
654 } else { 654 } else {
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 04b281002a76..291add255246 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -387,10 +387,10 @@ static void dm9601_set_multicast(struct net_device *net)
387 netdev_mc_count(net) > DM_MAX_MCAST) { 387 netdev_mc_count(net) > DM_MAX_MCAST) {
388 rx_ctl |= 0x04; 388 rx_ctl |= 0x04;
389 } else if (!netdev_mc_empty(net)) { 389 } else if (!netdev_mc_empty(net)) {
390 struct dev_mc_list *mc_list; 390 struct netdev_hw_addr *ha;
391 391
392 netdev_for_each_mc_addr(mc_list, net) { 392 netdev_for_each_mc_addr(ha, net) {
393 u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 393 u32 crc = ether_crc(ETH_ALEN, ha->addr) >> 26;
394 hashes[crc >> 3] |= 1 << (crc & 0x7); 394 hashes[crc >> 3] |= 1 << (crc & 0x7);
395 } 395 }
396 } 396 }
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 9f24e3f871e1..834d8cd3005d 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -453,12 +453,12 @@ static void mcs7830_data_set_multicast(struct net_device *net)
453 * for our 8 byte filter buffer 453 * for our 8 byte filter buffer
454 * to avoid allocating memory that 454 * to avoid allocating memory that
455 * is tricky to free later */ 455 * is tricky to free later */
456 struct dev_mc_list *mc_list; 456 struct netdev_hw_addr *ha;
457 u32 crc_bits; 457 u32 crc_bits;
458 458
459 /* Build the multicast hash filter. */ 459 /* Build the multicast hash filter. */
460 netdev_for_each_mc_addr(mc_list, net) { 460 netdev_for_each_mc_addr(ha, net) {
461 crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26; 461 crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
462 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); 462 data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
463 } 463 }
464 } 464 }
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 35b98b1b79e4..753ee6eb7edd 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -445,14 +445,14 @@ static void smsc75xx_set_multicast(struct net_device *netdev)
445 netif_dbg(dev, drv, dev->net, "receive all multicast enabled"); 445 netif_dbg(dev, drv, dev->net, "receive all multicast enabled");
446 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF; 446 pdata->rfe_ctl |= RFE_CTL_AM | RFE_CTL_DPF;
447 } else if (!netdev_mc_empty(dev->net)) { 447 } else if (!netdev_mc_empty(dev->net)) {
448 struct dev_mc_list *mc_list; 448 struct netdev_hw_addr *ha;
449 449
450 netif_dbg(dev, drv, dev->net, "receive multicast hash filter"); 450 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
451 451
452 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF; 452 pdata->rfe_ctl |= RFE_CTL_MHF | RFE_CTL_DPF;
453 453
454 netdev_for_each_mc_addr(mc_list, netdev) { 454 netdev_for_each_mc_addr(ha, netdev) {
455 u32 bitnum = smsc75xx_hash(mc_list->dmi_addr); 455 u32 bitnum = smsc75xx_hash(ha->addr);
456 pdata->multicast_hash_table[bitnum / 32] |= 456 pdata->multicast_hash_table[bitnum / 32] |=
457 (1 << (bitnum % 32)); 457 (1 << (bitnum % 32));
458 } 458 }
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 3135af63d378..12a3c88c5282 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -385,13 +385,13 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
385 pdata->mac_cr |= MAC_CR_MCPAS_; 385 pdata->mac_cr |= MAC_CR_MCPAS_;
386 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_); 386 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
387 } else if (!netdev_mc_empty(dev->net)) { 387 } else if (!netdev_mc_empty(dev->net)) {
388 struct dev_mc_list *mc_list; 388 struct netdev_hw_addr *ha;
389 389
390 pdata->mac_cr |= MAC_CR_HPFILT_; 390 pdata->mac_cr |= MAC_CR_HPFILT_;
391 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_); 391 pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
392 392
393 netdev_for_each_mc_addr(mc_list, netdev) { 393 netdev_for_each_mc_addr(ha, netdev) {
394 u32 bitnum = smsc95xx_hash(mc_list->dmi_addr); 394 u32 bitnum = smsc95xx_hash(ha->addr);
395 u32 mask = 0x01 << (bitnum & 0x1F); 395 u32 mask = 0x01 << (bitnum & 0x1F);
396 if (bitnum & 0x20) 396 if (bitnum & 0x20)
397 hash_hi |= mask; 397 hash_hi |= mask;
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 7177abc78dc6..a95c73de5824 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1069,12 +1069,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1069 * NOTE: strictly conforming cdc-ether devices should expect 1069 * NOTE: strictly conforming cdc-ether devices should expect
1070 * the ZLP here, but ignore the one-byte packet. 1070 * the ZLP here, but ignore the one-byte packet.
1071 */ 1071 */
1072 if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) { 1072 if (length % dev->maxpacket == 0) {
1073 urb->transfer_buffer_length++; 1073 if (!(info->flags & FLAG_SEND_ZLP)) {
1074 if (skb_tailroom(skb)) { 1074 urb->transfer_buffer_length++;
1075 skb->data[skb->len] = 0; 1075 if (skb_tailroom(skb)) {
1076 __skb_put(skb, 1); 1076 skb->data[skb->len] = 0;
1077 } 1077 __skb_put(skb, 1);
1078 }
1079 } else
1080 urb->transfer_flags |= URB_ZERO_PACKET;
1078 } 1081 }
1079 1082
1080 spin_lock_irqsave(&dev->txq.lock, flags); 1083 spin_lock_irqsave(&dev->txq.lock, flags);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 388751aa66e0..467bcff13cd0 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1703,11 +1703,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
1703 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1703 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1704 rx_mode = 0x0C; 1704 rx_mode = 0x0C;
1705 } else { 1705 } else {
1706 struct dev_mc_list *mclist; 1706 struct netdev_hw_addr *ha;
1707 1707
1708 memset(mc_filter, 0, sizeof(mc_filter)); 1708 memset(mc_filter, 0, sizeof(mc_filter));
1709 netdev_for_each_mc_addr(mclist, dev) { 1709 netdev_for_each_mc_addr(ha, dev) {
1710 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1710 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1711 1711
1712 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1712 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1713 } 1713 }
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index bc278d4ee89d..616f8c92b745 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -719,30 +719,30 @@ static u32 mii_check_media_mode(struct mac_regs __iomem *regs)
719 u32 status = 0; 719 u32 status = 0;
720 u16 ANAR; 720 u16 ANAR;
721 721
722 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) 722 if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
723 status |= VELOCITY_LINK_FAIL; 723 status |= VELOCITY_LINK_FAIL;
724 724
725 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) 725 if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
726 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; 726 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
727 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) 727 else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
728 status |= (VELOCITY_SPEED_1000); 728 status |= (VELOCITY_SPEED_1000);
729 else { 729 else {
730 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 730 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
731 if (ANAR & ANAR_TXFD) 731 if (ANAR & ADVERTISE_100FULL)
732 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); 732 status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
733 else if (ANAR & ANAR_TX) 733 else if (ANAR & ADVERTISE_100HALF)
734 status |= VELOCITY_SPEED_100; 734 status |= VELOCITY_SPEED_100;
735 else if (ANAR & ANAR_10FD) 735 else if (ANAR & ADVERTISE_10FULL)
736 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); 736 status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
737 else 737 else
738 status |= (VELOCITY_SPEED_10); 738 status |= (VELOCITY_SPEED_10);
739 } 739 }
740 740
741 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 741 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
742 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 742 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
743 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 743 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
744 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 744 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
745 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) 745 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
746 status |= VELOCITY_AUTONEG_ENABLE; 746 status |= VELOCITY_AUTONEG_ENABLE;
747 } 747 }
748 } 748 }
@@ -801,23 +801,23 @@ static void set_mii_flow_control(struct velocity_info *vptr)
801 /*Enable or Disable PAUSE in ANAR */ 801 /*Enable or Disable PAUSE in ANAR */
802 switch (vptr->options.flow_cntl) { 802 switch (vptr->options.flow_cntl) {
803 case FLOW_CNTL_TX: 803 case FLOW_CNTL_TX:
804 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 804 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
805 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 805 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
806 break; 806 break;
807 807
808 case FLOW_CNTL_RX: 808 case FLOW_CNTL_RX:
809 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 809 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
810 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 810 MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
811 break; 811 break;
812 812
813 case FLOW_CNTL_TX_RX: 813 case FLOW_CNTL_TX_RX:
814 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 814 MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
815 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 815 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
816 break; 816 break;
817 817
818 case FLOW_CNTL_DISABLE: 818 case FLOW_CNTL_DISABLE:
819 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 819 MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
820 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 820 MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
821 break; 821 break;
822 default: 822 default:
823 break; 823 break;
@@ -832,10 +832,10 @@ static void set_mii_flow_control(struct velocity_info *vptr)
832 */ 832 */
833static void mii_set_auto_on(struct velocity_info *vptr) 833static void mii_set_auto_on(struct velocity_info *vptr)
834{ 834{
835 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) 835 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
836 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 836 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
837 else 837 else
838 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); 838 MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
839} 839}
840 840
841static u32 check_connection_type(struct mac_regs __iomem *regs) 841static u32 check_connection_type(struct mac_regs __iomem *regs)
@@ -860,11 +860,11 @@ static u32 check_connection_type(struct mac_regs __iomem *regs)
860 else 860 else
861 status |= VELOCITY_SPEED_100; 861 status |= VELOCITY_SPEED_100;
862 862
863 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 863 if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
864 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 864 velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
865 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 865 if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
866 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 866 == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
867 if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs)) 867 if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
868 status |= VELOCITY_AUTONEG_ENABLE; 868 status |= VELOCITY_AUTONEG_ENABLE;
869 } 869 }
870 } 870 }
@@ -905,7 +905,7 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
905 */ 905 */
906 906
907 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 907 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
908 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); 908 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
909 909
910 /* 910 /*
911 * If connection type is AUTO 911 * If connection type is AUTO
@@ -915,9 +915,9 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
915 /* clear force MAC mode bit */ 915 /* clear force MAC mode bit */
916 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR); 916 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
917 /* set duplex mode of MAC according to duplex mode of MII */ 917 /* set duplex mode of MAC according to duplex mode of MII */
918 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs); 918 MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
919 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 919 MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
920 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); 920 MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
921 921
922 /* enable AUTO-NEGO mode */ 922 /* enable AUTO-NEGO mode */
923 mii_set_auto_on(vptr); 923 mii_set_auto_on(vptr);
@@ -952,31 +952,31 @@ static int velocity_set_media_mode(struct velocity_info *vptr, u32 mii_status)
952 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 952 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
953 } 953 }
954 954
955 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 955 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
956 956
957 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10)) 957 if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
958 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG); 958 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
959 else 959 else
960 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG); 960 BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
961 961
962 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ 962 /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
963 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); 963 velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
964 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); 964 ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
965 if (mii_status & VELOCITY_SPEED_100) { 965 if (mii_status & VELOCITY_SPEED_100) {
966 if (mii_status & VELOCITY_DUPLEX_FULL) 966 if (mii_status & VELOCITY_DUPLEX_FULL)
967 ANAR |= ANAR_TXFD; 967 ANAR |= ADVERTISE_100FULL;
968 else 968 else
969 ANAR |= ANAR_TX; 969 ANAR |= ADVERTISE_100HALF;
970 } else { 970 } else {
971 if (mii_status & VELOCITY_DUPLEX_FULL) 971 if (mii_status & VELOCITY_DUPLEX_FULL)
972 ANAR |= ANAR_10FD; 972 ANAR |= ADVERTISE_10FULL;
973 else 973 else
974 ANAR |= ANAR_10; 974 ANAR |= ADVERTISE_10HALF;
975 } 975 }
976 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); 976 velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
977 /* enable AUTO-NEGO mode */ 977 /* enable AUTO-NEGO mode */
978 mii_set_auto_on(vptr); 978 mii_set_auto_on(vptr);
979 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ 979 /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
980 } 980 }
981 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ 981 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
982 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ 982 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@ -1126,7 +1126,7 @@ static void velocity_set_multi(struct net_device *dev)
1126 struct mac_regs __iomem *regs = vptr->mac_regs; 1126 struct mac_regs __iomem *regs = vptr->mac_regs;
1127 u8 rx_mode; 1127 u8 rx_mode;
1128 int i; 1128 int i;
1129 struct dev_mc_list *mclist; 1129 struct netdev_hw_addr *ha;
1130 1130
1131 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1131 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1132 writel(0xffffffff, &regs->MARCAM[0]); 1132 writel(0xffffffff, &regs->MARCAM[0]);
@@ -1142,8 +1142,8 @@ static void velocity_set_multi(struct net_device *dev)
1142 mac_get_cam_mask(regs, vptr->mCAMmask); 1142 mac_get_cam_mask(regs, vptr->mCAMmask);
1143 1143
1144 i = 0; 1144 i = 0;
1145 netdev_for_each_mc_addr(mclist, dev) { 1145 netdev_for_each_mc_addr(ha, dev) {
1146 mac_set_cam(regs, i + offset, mclist->dmi_addr); 1146 mac_set_cam(regs, i + offset, ha->addr);
1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7); 1147 vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
1148 i++; 1148 i++;
1149 } 1149 }
@@ -1178,36 +1178,36 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1178 /* 1178 /*
1179 * Reset to hardware default 1179 * Reset to hardware default
1180 */ 1180 */
1181 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1181 MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1182 /* 1182 /*
1183 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1183 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1184 * off it in NWay-forced half mode for NWay-forced v.s. 1184 * off it in NWay-forced half mode for NWay-forced v.s.
1185 * legacy-forced issue. 1185 * legacy-forced issue.
1186 */ 1186 */
1187 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1187 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1188 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1188 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1189 else 1189 else
1190 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1190 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1191 /* 1191 /*
1192 * Turn on Link/Activity LED enable bit for CIS8201 1192 * Turn on Link/Activity LED enable bit for CIS8201
1193 */ 1193 */
1194 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); 1194 MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
1195 break; 1195 break;
1196 case PHYID_VT3216_32BIT: 1196 case PHYID_VT3216_32BIT:
1197 case PHYID_VT3216_64BIT: 1197 case PHYID_VT3216_64BIT:
1198 /* 1198 /*
1199 * Reset to hardware default 1199 * Reset to hardware default
1200 */ 1200 */
1201 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1201 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1202 /* 1202 /*
1203 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1203 * Turn on ECHODIS bit in NWay-forced full mode and turn it
1204 * off it in NWay-forced half mode for NWay-forced v.s. 1204 * off it in NWay-forced half mode for NWay-forced v.s.
1205 * legacy-forced issue 1205 * legacy-forced issue
1206 */ 1206 */
1207 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1207 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1208 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1208 MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1209 else 1209 else
1210 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs); 1210 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
1211 break; 1211 break;
1212 1212
1213 case PHYID_MARVELL_1000: 1213 case PHYID_MARVELL_1000:
@@ -1219,15 +1219,15 @@ static void mii_init(struct velocity_info *vptr, u32 mii_status)
1219 /* 1219 /*
1220 * Reset to hardware default 1220 * Reset to hardware default
1221 */ 1221 */
1222 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs); 1222 MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
1223 break; 1223 break;
1224 default: 1224 default:
1225 ; 1225 ;
1226 } 1226 }
1227 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); 1227 velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
1228 if (BMCR & BMCR_ISO) { 1228 if (BMCR & BMCR_ISOLATE) {
1229 BMCR &= ~BMCR_ISO; 1229 BMCR &= ~BMCR_ISOLATE;
1230 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); 1230 velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
1231 } 1231 }
1232} 1232}
1233 1233
@@ -2953,13 +2953,13 @@ static int velocity_set_wol(struct velocity_info *vptr)
2953 2953
2954 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) { 2954 if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
2955 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) 2955 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
2956 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs); 2956 MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
2957 2957
2958 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs); 2958 MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
2959 } 2959 }
2960 2960
2961 if (vptr->mii_status & VELOCITY_SPEED_1000) 2961 if (vptr->mii_status & VELOCITY_SPEED_1000)
2962 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 2962 MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
2963 2963
2964 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR); 2964 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
2965 2965
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index ef4a0f64ba16..c38191179fae 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1240,86 +1240,16 @@ struct velocity_context {
1240 u32 pattern[8]; 1240 u32 pattern[8];
1241}; 1241};
1242 1242
1243
1244/*
1245 * MII registers.
1246 */
1247
1248
1249/* 1243/*
1250 * Registers in the MII (offset unit is WORD) 1244 * Registers in the MII (offset unit is WORD)
1251 */ 1245 */
1252 1246
1253#define MII_REG_BMCR 0x00 // physical address
1254#define MII_REG_BMSR 0x01 //
1255#define MII_REG_PHYID1 0x02 // OUI
1256#define MII_REG_PHYID2 0x03 // OUI + Module ID + REV ID
1257#define MII_REG_ANAR 0x04 //
1258#define MII_REG_ANLPAR 0x05 //
1259#define MII_REG_G1000CR 0x09 //
1260#define MII_REG_G1000SR 0x0A //
1261#define MII_REG_MODCFG 0x10 //
1262#define MII_REG_TCSR 0x16 //
1263#define MII_REG_PLED 0x1B //
1264// NS, MYSON only
1265#define MII_REG_PCR 0x17 //
1266// ESI only
1267#define MII_REG_PCSR 0x17 //
1268#define MII_REG_AUXCR 0x1C //
1269
1270// Marvell 88E1000/88E1000S 1247// Marvell 88E1000/88E1000S
1271#define MII_REG_PSCR 0x10 // PHY specific control register 1248#define MII_REG_PSCR 0x10 // PHY specific control register
1272 1249
1273// 1250//
1274// Bits in the BMCR register 1251// Bits in the Silicon revision register
1275//
1276#define BMCR_RESET 0x8000 //
1277#define BMCR_LBK 0x4000 //
1278#define BMCR_SPEED100 0x2000 //
1279#define BMCR_AUTO 0x1000 //
1280#define BMCR_PD 0x0800 //
1281#define BMCR_ISO 0x0400 //
1282#define BMCR_REAUTO 0x0200 //
1283#define BMCR_FDX 0x0100 //
1284#define BMCR_SPEED1G 0x0040 //
1285//
1286// Bits in the BMSR register
1287//
1288#define BMSR_AUTOCM 0x0020 //
1289#define BMSR_LNK 0x0004 //
1290
1291//
1292// Bits in the ANAR register
1293//
1294#define ANAR_ASMDIR 0x0800 // Asymmetric PAUSE support
1295#define ANAR_PAUSE 0x0400 // Symmetric PAUSE Support
1296#define ANAR_T4 0x0200 //
1297#define ANAR_TXFD 0x0100 //
1298#define ANAR_TX 0x0080 //
1299#define ANAR_10FD 0x0040 //
1300#define ANAR_10 0x0020 //
1301//
1302// Bits in the ANLPAR register
1303//
1304#define ANLPAR_ASMDIR 0x0800 // Asymmetric PAUSE support
1305#define ANLPAR_PAUSE 0x0400 // Symmetric PAUSE Support
1306#define ANLPAR_T4 0x0200 //
1307#define ANLPAR_TXFD 0x0100 //
1308#define ANLPAR_TX 0x0080 //
1309#define ANLPAR_10FD 0x0040 //
1310#define ANLPAR_10 0x0020 //
1311
1312//
1313// Bits in the G1000CR register
1314//
1315#define G1000CR_1000FD 0x0200 // PHY is 1000-T Full-duplex capable
1316#define G1000CR_1000 0x0100 // PHY is 1000-T Half-duplex capable
1317
1318//
1319// Bits in the G1000SR register
1320// 1252//
1321#define G1000SR_1000FD 0x0800 // LP PHY is 1000-T Full-duplex capable
1322#define G1000SR_1000 0x0400 // LP PHY is 1000-T Half-duplex capable
1323 1253
1324#define TCSR_ECHODIS 0x2000 // 1254#define TCSR_ECHODIS 0x2000 //
1325#define AUXCR_MDPPS 0x0004 // 1255#define AUXCR_MDPPS 0x0004 //
@@ -1338,7 +1268,6 @@ struct velocity_context {
1338 1268
1339#define PHYID_REV_ID_MASK 0x0000000FUL 1269#define PHYID_REV_ID_MASK 0x0000000FUL
1340 1270
1341#define PHYID_GET_PHY_REV_ID(i) ((i) & PHYID_REV_ID_MASK)
1342#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK) 1271#define PHYID_GET_PHY_ID(i) ((i) & ~PHYID_REV_ID_MASK)
1343 1272
1344#define MII_REG_BITS_ON(x,i,p) do {\ 1273#define MII_REG_BITS_ON(x,i,p) do {\
@@ -1362,8 +1291,8 @@ struct velocity_context {
1362 1291
1363#define MII_GET_PHY_ID(p) ({\ 1292#define MII_GET_PHY_ID(p) ({\
1364 u32 id;\ 1293 u32 id;\
1365 velocity_mii_read((p),MII_REG_PHYID2,(u16 *) &id);\ 1294 velocity_mii_read((p),MII_PHYSID2,(u16 *) &id);\
1366 velocity_mii_read((p),MII_REG_PHYID1,((u16 *) &id)+1);\ 1295 velocity_mii_read((p),MII_PHYSID1,((u16 *) &id)+1);\
1367 (id);}) 1296 (id);})
1368 1297
1369/* 1298/*
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b0577dd1a42d..b0a85d038796 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -40,8 +40,7 @@ module_param(gso, bool, 0444);
40 40
41#define VIRTNET_SEND_COMMAND_SG_MAX 2 41#define VIRTNET_SEND_COMMAND_SG_MAX 2
42 42
43struct virtnet_info 43struct virtnet_info {
44{
45 struct virtio_device *vdev; 44 struct virtio_device *vdev;
46 struct virtqueue *rvq, *svq, *cvq; 45 struct virtqueue *rvq, *svq, *cvq;
47 struct net_device *dev; 46 struct net_device *dev;
@@ -62,6 +61,10 @@ struct virtnet_info
62 61
63 /* Chain pages by the private ptr. */ 62 /* Chain pages by the private ptr. */
64 struct page *pages; 63 struct page *pages;
64
65 /* fragments + linear part + virtio header */
66 struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
67 struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
65}; 68};
66 69
67struct skb_vnet_hdr { 70struct skb_vnet_hdr {
@@ -324,10 +327,8 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
324{ 327{
325 struct sk_buff *skb; 328 struct sk_buff *skb;
326 struct skb_vnet_hdr *hdr; 329 struct skb_vnet_hdr *hdr;
327 struct scatterlist sg[2];
328 int err; 330 int err;
329 331
330 sg_init_table(sg, 2);
331 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN); 332 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
332 if (unlikely(!skb)) 333 if (unlikely(!skb))
333 return -ENOMEM; 334 return -ENOMEM;
@@ -335,11 +336,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
335 skb_put(skb, MAX_PACKET_LEN); 336 skb_put(skb, MAX_PACKET_LEN);
336 337
337 hdr = skb_vnet_hdr(skb); 338 hdr = skb_vnet_hdr(skb);
338 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); 339 sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
339 340
340 skb_to_sgvec(skb, sg + 1, 0, skb->len); 341 skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
341 342
342 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb); 343 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
343 if (err < 0) 344 if (err < 0)
344 dev_kfree_skb(skb); 345 dev_kfree_skb(skb);
345 346
@@ -348,13 +349,11 @@ static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
348 349
349static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp) 350static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
350{ 351{
351 struct scatterlist sg[MAX_SKB_FRAGS + 2];
352 struct page *first, *list = NULL; 352 struct page *first, *list = NULL;
353 char *p; 353 char *p;
354 int i, err, offset; 354 int i, err, offset;
355 355
356 sg_init_table(sg, MAX_SKB_FRAGS + 2); 356 /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
357 /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
358 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) { 357 for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
359 first = get_a_page(vi, gfp); 358 first = get_a_page(vi, gfp);
360 if (!first) { 359 if (!first) {
@@ -362,7 +361,7 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
362 give_pages(vi, list); 361 give_pages(vi, list);
363 return -ENOMEM; 362 return -ENOMEM;
364 } 363 }
365 sg_set_buf(&sg[i], page_address(first), PAGE_SIZE); 364 sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
366 365
367 /* chain new page in list head to match sg */ 366 /* chain new page in list head to match sg */
368 first->private = (unsigned long)list; 367 first->private = (unsigned long)list;
@@ -376,17 +375,17 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
376 } 375 }
377 p = page_address(first); 376 p = page_address(first);
378 377
379 /* sg[0], sg[1] share the same page */ 378 /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
380 /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/ 379 /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
381 sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr)); 380 sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
382 381
383 /* sg[1] for data packet, from offset */ 382 /* vi->rx_sg[1] for data packet, from offset */
384 offset = sizeof(struct padded_vnet_hdr); 383 offset = sizeof(struct padded_vnet_hdr);
385 sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset); 384 sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
386 385
387 /* chain first in list head */ 386 /* chain first in list head */
388 first->private = (unsigned long)list; 387 first->private = (unsigned long)list;
389 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2, 388 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
390 first); 389 first);
391 if (err < 0) 390 if (err < 0)
392 give_pages(vi, first); 391 give_pages(vi, first);
@@ -397,16 +396,15 @@ static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
397static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp) 396static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
398{ 397{
399 struct page *page; 398 struct page *page;
400 struct scatterlist sg;
401 int err; 399 int err;
402 400
403 page = get_a_page(vi, gfp); 401 page = get_a_page(vi, gfp);
404 if (!page) 402 if (!page)
405 return -ENOMEM; 403 return -ENOMEM;
406 404
407 sg_init_one(&sg, page_address(page), PAGE_SIZE); 405 sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
408 406
409 err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page); 407 err = vi->rvq->vq_ops->add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
410 if (err < 0) 408 if (err < 0)
411 give_pages(vi, page); 409 give_pages(vi, page);
412 410
@@ -515,12 +513,9 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
515 513
516static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb) 514static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
517{ 515{
518 struct scatterlist sg[2+MAX_SKB_FRAGS];
519 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb); 516 struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
520 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; 517 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
521 518
522 sg_init_table(sg, 2+MAX_SKB_FRAGS);
523
524 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest); 519 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
525 520
526 if (skb->ip_summed == CHECKSUM_PARTIAL) { 521 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -554,12 +549,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
554 549
555 /* Encode metadata header at front. */ 550 /* Encode metadata header at front. */
556 if (vi->mergeable_rx_bufs) 551 if (vi->mergeable_rx_bufs)
557 sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr); 552 sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
558 else 553 else
559 sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr); 554 sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
560 555
561 hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1; 556 hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
562 return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb); 557 return vi->svq->vq_ops->add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
558 0, skb);
563} 559}
564 560
565static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) 561static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -722,7 +718,6 @@ static void virtnet_set_rx_mode(struct net_device *dev)
722 struct scatterlist sg[2]; 718 struct scatterlist sg[2];
723 u8 promisc, allmulti; 719 u8 promisc, allmulti;
724 struct virtio_net_ctrl_mac *mac_data; 720 struct virtio_net_ctrl_mac *mac_data;
725 struct dev_addr_list *addr;
726 struct netdev_hw_addr *ha; 721 struct netdev_hw_addr *ha;
727 int uc_count; 722 int uc_count;
728 int mc_count; 723 int mc_count;
@@ -779,8 +774,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
779 774
780 mac_data->entries = mc_count; 775 mac_data->entries = mc_count;
781 i = 0; 776 i = 0;
782 netdev_for_each_mc_addr(addr, dev) 777 netdev_for_each_mc_addr(ha, dev)
783 memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN); 778 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
784 779
785 sg_set_buf(&sg[1], mac_data, 780 sg_set_buf(&sg[1], mac_data,
786 sizeof(mac_data->entries) + (mc_count * ETH_ALEN)); 781 sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
@@ -942,6 +937,8 @@ static int virtnet_probe(struct virtio_device *vdev)
942 vdev->priv = vi; 937 vdev->priv = vi;
943 vi->pages = NULL; 938 vi->pages = NULL;
944 INIT_DELAYED_WORK(&vi->refill, refill_work); 939 INIT_DELAYED_WORK(&vi->refill, refill_work);
940 sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
941 sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
945 942
946 /* If we can receive ANY GSO packets, we must allocate large ones. */ 943 /* If we can receive ANY GSO packets, we must allocate large ones. */
947 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 944 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index cff3485d9673..90e783a09245 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1174,7 +1174,6 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1174 netif_receive_skb(skb); 1174 netif_receive_skb(skb);
1175 } 1175 }
1176 1176
1177 adapter->netdev->last_rx = jiffies;
1178 ctx->skb = NULL; 1177 ctx->skb = NULL;
1179 } 1178 }
1180 1179
@@ -1675,11 +1674,11 @@ vmxnet3_copy_mc(struct net_device *netdev)
1675 /* We may be called with BH disabled */ 1674 /* We may be called with BH disabled */
1676 buf = kmalloc(sz, GFP_ATOMIC); 1675 buf = kmalloc(sz, GFP_ATOMIC);
1677 if (buf) { 1676 if (buf) {
1678 struct dev_mc_list *mc; 1677 struct netdev_hw_addr *ha;
1679 int i = 0; 1678 int i = 0;
1680 1679
1681 netdev_for_each_mc_addr(mc, netdev) 1680 netdev_for_each_mc_addr(ha, netdev)
1682 memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr, 1681 memcpy(buf + i++ * ETH_ALEN, ha->addr,
1683 ETH_ALEN); 1682 ETH_ALEN);
1684 } 1683 }
1685 } 1684 }
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index a21a25d218b6..a5fc8166c01d 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -357,8 +357,10 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
357 357
358 switch (host_type) { 358 switch (host_type) {
359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: 359 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
360 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 360 if (func_id == 0) {
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM; 361 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
362 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
363 }
362 break; 364 break;
363 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: 365 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
364 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 366 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -633,8 +635,10 @@ vxge_hw_device_initialize(
633 __vxge_hw_device_pci_e_init(hldev); 635 __vxge_hw_device_pci_e_init(hldev);
634 636
635 status = __vxge_hw_device_reg_addr_get(hldev); 637 status = __vxge_hw_device_reg_addr_get(hldev);
636 if (status != VXGE_HW_OK) 638 if (status != VXGE_HW_OK) {
639 vfree(hldev);
637 goto exit; 640 goto exit;
641 }
638 __vxge_hw_device_id_get(hldev); 642 __vxge_hw_device_id_get(hldev);
639 643
640 __vxge_hw_device_host_info_get(hldev); 644 __vxge_hw_device_host_info_get(hldev);
@@ -1218,14 +1222,13 @@ __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1218} 1222}
1219 1223
1220/* 1224/*
1221 * __vxge_hw_ring_initial_replenish - Initial replenish of RxDs 1225 * __vxge_hw_ring_replenish - Initial replenish of RxDs
1222 * This function replenishes the RxDs from reserve array to work array 1226 * This function replenishes the RxDs from reserve array to work array
1223 */ 1227 */
1224enum vxge_hw_status 1228enum vxge_hw_status
1225vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag) 1229vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1226{ 1230{
1227 void *rxd; 1231 void *rxd;
1228 int i = 0;
1229 struct __vxge_hw_channel *channel; 1232 struct __vxge_hw_channel *channel;
1230 enum vxge_hw_status status = VXGE_HW_OK; 1233 enum vxge_hw_status status = VXGE_HW_OK;
1231 1234
@@ -1246,11 +1249,6 @@ vxge_hw_ring_replenish(struct __vxge_hw_ring *ring, u16 min_flag)
1246 } 1249 }
1247 1250
1248 vxge_hw_ring_rxd_post(ring, rxd); 1251 vxge_hw_ring_rxd_post(ring, rxd);
1249 if (min_flag) {
1250 i++;
1251 if (i == VXGE_HW_RING_MIN_BUFF_ALLOCATION)
1252 break;
1253 }
1254 } 1252 }
1255 status = VXGE_HW_OK; 1253 status = VXGE_HW_OK;
1256exit: 1254exit:
@@ -1355,7 +1353,7 @@ __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1355 * Currently we don't have a case when the 1) is done without the 2). 1353 * Currently we don't have a case when the 1) is done without the 2).
1356 */ 1354 */
1357 if (ring->rxd_init) { 1355 if (ring->rxd_init) {
1358 status = vxge_hw_ring_replenish(ring, 1); 1356 status = vxge_hw_ring_replenish(ring);
1359 if (status != VXGE_HW_OK) { 1357 if (status != VXGE_HW_OK) {
1360 __vxge_hw_ring_delete(vp); 1358 __vxge_hw_ring_delete(vp);
1361 goto exit; 1359 goto exit;
@@ -1417,7 +1415,7 @@ enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1417 goto exit; 1415 goto exit;
1418 1416
1419 if (ring->rxd_init) { 1417 if (ring->rxd_init) {
1420 status = vxge_hw_ring_replenish(ring, 1); 1418 status = vxge_hw_ring_replenish(ring);
1421 if (status != VXGE_HW_OK) 1419 if (status != VXGE_HW_OK)
1422 goto exit; 1420 goto exit;
1423 } 1421 }
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 13f5416307f8..4ae2625d4d8f 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -765,10 +765,18 @@ struct vxge_hw_device_hw_info {
765#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6 765#define VXGE_HW_SR_VH_VIRTUAL_FUNCTION 6
766#define VXGE_HW_VH_NORMAL_FUNCTION 7 766#define VXGE_HW_VH_NORMAL_FUNCTION 7
767 u64 function_mode; 767 u64 function_mode;
768#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 0 768#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 0
769#define VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION 1 769#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION 1
770#define VXGE_HW_FUNCTION_MODE_SRIOV 2 770#define VXGE_HW_FUNCTION_MODE_SRIOV 2
771#define VXGE_HW_FUNCTION_MODE_MRIOV 3 771#define VXGE_HW_FUNCTION_MODE_MRIOV 3
772#define VXGE_HW_FUNCTION_MODE_MRIOV_8 4
773#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17 5
774#define VXGE_HW_FUNCTION_MODE_SRIOV_8 6
775#define VXGE_HW_FUNCTION_MODE_SRIOV_4 7
776#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2 8
777#define VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_4 9
778#define VXGE_HW_FUNCTION_MODE_MRIOV_4 10
779
772 u32 func_id; 780 u32 func_id;
773 u64 vpath_mask; 781 u64 vpath_mask;
774 struct vxge_hw_device_version fw_version; 782 struct vxge_hw_device_version fw_version;
@@ -1915,20 +1923,32 @@ static inline void *vxge_os_dma_malloc(struct pci_dev *pdev,
1915 gfp_t flags; 1923 gfp_t flags;
1916 void *vaddr; 1924 void *vaddr;
1917 unsigned long misaligned = 0; 1925 unsigned long misaligned = 0;
1926 int realloc_flag = 0;
1918 *p_dma_acch = *p_dmah = NULL; 1927 *p_dma_acch = *p_dmah = NULL;
1919 1928
1920 if (in_interrupt()) 1929 if (in_interrupt())
1921 flags = GFP_ATOMIC | GFP_DMA; 1930 flags = GFP_ATOMIC | GFP_DMA;
1922 else 1931 else
1923 flags = GFP_KERNEL | GFP_DMA; 1932 flags = GFP_KERNEL | GFP_DMA;
1924 1933realloc:
1925 size += VXGE_CACHE_LINE_SIZE;
1926
1927 vaddr = kmalloc((size), flags); 1934 vaddr = kmalloc((size), flags);
1928 if (vaddr == NULL) 1935 if (vaddr == NULL)
1929 return vaddr; 1936 return vaddr;
1930 misaligned = (unsigned long)VXGE_ALIGN(*((u64 *)&vaddr), 1937 misaligned = (unsigned long)VXGE_ALIGN((unsigned long)vaddr,
1931 VXGE_CACHE_LINE_SIZE); 1938 VXGE_CACHE_LINE_SIZE);
1939 if (realloc_flag)
1940 goto out;
1941
1942 if (misaligned) {
1943 /* misaligned, free current one and try allocating
1944 * size + VXGE_CACHE_LINE_SIZE memory
1945 */
1946 kfree((void *) vaddr);
1947 size += VXGE_CACHE_LINE_SIZE;
1948 realloc_flag = 1;
1949 goto realloc;
1950 }
1951out:
1932 *(unsigned long *)p_dma_acch = misaligned; 1952 *(unsigned long *)p_dma_acch = misaligned;
1933 vaddr = (void *)((u8 *)vaddr + misaligned); 1953 vaddr = (void *)((u8 *)vaddr + misaligned);
1934 return vaddr; 1954 return vaddr;
@@ -2254,4 +2274,6 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2254 struct vxge_hw_rth_hash_types *hash_type, 2274 struct vxge_hw_rth_hash_types *hash_type,
2255 u16 bucket_size); 2275 u16 bucket_size);
2256 2276
2277enum vxge_hw_status
2278__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2257#endif 2279#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index aaf374cfd322..cadef8549c06 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -109,7 +109,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
109 int index, offset; 109 int index, offset;
110 enum vxge_hw_status status; 110 enum vxge_hw_status status;
111 u64 reg; 111 u64 reg;
112 u8 *reg_space = (u8 *) space; 112 u64 *reg_space = (u64 *) space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *)
115 pci_get_drvdata(vdev->pdev); 115 pci_get_drvdata(vdev->pdev);
@@ -129,8 +129,7 @@ static void vxge_ethtool_gregs(struct net_device *dev,
129 __func__, __LINE__); 129 __func__, __LINE__);
130 return; 130 return;
131 } 131 }
132 132 *reg_space++ = reg;
133 memcpy((reg_space + offset), &reg, 8);
134 } 133 }
135 } 134 }
136} 135}
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index ba6d0da78c30..2bab36421f71 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -445,7 +445,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
445 ring->ndev->name, __func__, __LINE__); 445 ring->ndev->name, __func__, __LINE__);
446 ring->pkts_processed = 0; 446 ring->pkts_processed = 0;
447 447
448 vxge_hw_ring_replenish(ringh, 0); 448 vxge_hw_ring_replenish(ringh);
449 449
450 do { 450 do {
451 prefetch((char *)dtr + L1_CACHE_BYTES); 451 prefetch((char *)dtr + L1_CACHE_BYTES);
@@ -1118,7 +1118,7 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1118 */ 1118 */
1119static void vxge_set_multicast(struct net_device *dev) 1119static void vxge_set_multicast(struct net_device *dev)
1120{ 1120{
1121 struct dev_mc_list *mclist; 1121 struct netdev_hw_addr *ha;
1122 struct vxgedev *vdev; 1122 struct vxgedev *vdev;
1123 int i, mcast_cnt = 0; 1123 int i, mcast_cnt = 0;
1124 struct __vxge_hw_device *hldev; 1124 struct __vxge_hw_device *hldev;
@@ -1218,8 +1218,8 @@ static void vxge_set_multicast(struct net_device *dev)
1218 } 1218 }
1219 1219
1220 /* Add new ones */ 1220 /* Add new ones */
1221 netdev_for_each_mc_addr(mclist, dev) { 1221 netdev_for_each_mc_addr(ha, dev) {
1222 memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN); 1222 memcpy(mac_info.macaddr, ha->addr, ETH_ALEN);
1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath; 1223 for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
1224 vpath_idx++) { 1224 vpath_idx++) {
1225 mac_info.vpath_no = vpath_idx; 1225 mac_info.vpath_no = vpath_idx;
@@ -1364,28 +1364,26 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id) 1364void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1365{ 1365{
1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1366 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1367 int msix_id, alarm_msix_id; 1367 int msix_id = 0;
1368 int tim_msix_id[4] = {[0 ...3] = 0}; 1368 int tim_msix_id[4] = {0, 1, 0, 0};
1369 int alarm_msix_id = VXGE_ALARM_MSIX_ID;
1369 1370
1370 vxge_hw_vpath_intr_enable(vpath->handle); 1371 vxge_hw_vpath_intr_enable(vpath->handle);
1371 1372
1372 if (vdev->config.intr_type == INTA) 1373 if (vdev->config.intr_type == INTA)
1373 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle); 1374 vxge_hw_vpath_inta_unmask_tx_rx(vpath->handle);
1374 else { 1375 else {
1375 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1376 alarm_msix_id =
1377 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
1378
1379 tim_msix_id[0] = msix_id;
1380 tim_msix_id[1] = msix_id + 1;
1381 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id, 1376 vxge_hw_vpath_msix_set(vpath->handle, tim_msix_id,
1382 alarm_msix_id); 1377 alarm_msix_id);
1383 1378
1379 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1384 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id); 1380 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1385 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1); 1381 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id + 1);
1386 1382
1387 /* enable the alarm vector */ 1383 /* enable the alarm vector */
1388 vxge_hw_vpath_msix_unmask(vpath->handle, alarm_msix_id); 1384 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1385 VXGE_HW_VPATH_MSIX_ACTIVE) + alarm_msix_id;
1386 vxge_hw_vpath_msix_unmask(vpath->handle, msix_id);
1389 } 1387 }
1390} 1388}
1391 1389
@@ -1406,12 +1404,13 @@ void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1406 if (vdev->config.intr_type == INTA) 1404 if (vdev->config.intr_type == INTA)
1407 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle); 1405 vxge_hw_vpath_inta_mask_tx_rx(vpath->handle);
1408 else { 1406 else {
1409 msix_id = vp_id * VXGE_HW_VPATH_MSIX_ACTIVE; 1407 msix_id = vpath->device_id * VXGE_HW_VPATH_MSIX_ACTIVE;
1410 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1408 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1411 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1); 1409 vxge_hw_vpath_msix_mask(vpath->handle, msix_id + 1);
1412 1410
1413 /* disable the alarm vector */ 1411 /* disable the alarm vector */
1414 msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 1412 msix_id = (vpath->handle->vpath->hldev->first_vp_id *
1413 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
1415 vxge_hw_vpath_msix_mask(vpath->handle, msix_id); 1414 vxge_hw_vpath_msix_mask(vpath->handle, msix_id);
1416 } 1415 }
1417} 1416}
@@ -2224,19 +2223,18 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2224 enum vxge_hw_status status; 2223 enum vxge_hw_status status;
2225 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id; 2224 struct vxge_vpath *vpath = (struct vxge_vpath *)dev_id;
2226 struct vxgedev *vdev = vpath->vdev; 2225 struct vxgedev *vdev = vpath->vdev;
2227 int alarm_msix_id = 2226 int msix_id = (vpath->handle->vpath->vp_id *
2228 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2; 2227 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2229 2228
2230 for (i = 0; i < vdev->no_of_vpath; i++) { 2229 for (i = 0; i < vdev->no_of_vpath; i++) {
2231 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, 2230 vxge_hw_vpath_msix_mask(vdev->vpaths[i].handle, msix_id);
2232 alarm_msix_id);
2233 2231
2234 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle, 2232 status = vxge_hw_vpath_alarm_process(vdev->vpaths[i].handle,
2235 vdev->exec_mode); 2233 vdev->exec_mode);
2236 if (status == VXGE_HW_OK) { 2234 if (status == VXGE_HW_OK) {
2237 2235
2238 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle, 2236 vxge_hw_vpath_msix_unmask(vdev->vpaths[i].handle,
2239 alarm_msix_id); 2237 msix_id);
2240 continue; 2238 continue;
2241 } 2239 }
2242 vxge_debug_intr(VXGE_ERR, 2240 vxge_debug_intr(VXGE_ERR,
@@ -2249,18 +2247,17 @@ vxge_alarm_msix_handle(int irq, void *dev_id)
2249static int vxge_alloc_msix(struct vxgedev *vdev) 2247static int vxge_alloc_msix(struct vxgedev *vdev)
2250{ 2248{
2251 int j, i, ret = 0; 2249 int j, i, ret = 0;
2252 int intr_cnt = 0; 2250 int msix_intr_vect = 0, temp;
2253 int alarm_msix_id = 0, msix_intr_vect = 0;
2254 vdev->intr_cnt = 0; 2251 vdev->intr_cnt = 0;
2255 2252
2253start:
2256 /* Tx/Rx MSIX Vectors count */ 2254 /* Tx/Rx MSIX Vectors count */
2257 vdev->intr_cnt = vdev->no_of_vpath * 2; 2255 vdev->intr_cnt = vdev->no_of_vpath * 2;
2258 2256
2259 /* Alarm MSIX Vectors count */ 2257 /* Alarm MSIX Vectors count */
2260 vdev->intr_cnt++; 2258 vdev->intr_cnt++;
2261 2259
2262 intr_cnt = (vdev->max_vpath_supported * 2) + 1; 2260 vdev->entries = kzalloc(vdev->intr_cnt * sizeof(struct msix_entry),
2263 vdev->entries = kzalloc(intr_cnt * sizeof(struct msix_entry),
2264 GFP_KERNEL); 2261 GFP_KERNEL);
2265 if (!vdev->entries) { 2262 if (!vdev->entries) {
2266 vxge_debug_init(VXGE_ERR, 2263 vxge_debug_init(VXGE_ERR,
@@ -2269,8 +2266,9 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2269 return -ENOMEM; 2266 return -ENOMEM;
2270 } 2267 }
2271 2268
2272 vdev->vxge_entries = kzalloc(intr_cnt * sizeof(struct vxge_msix_entry), 2269 vdev->vxge_entries =
2273 GFP_KERNEL); 2270 kzalloc(vdev->intr_cnt * sizeof(struct vxge_msix_entry),
2271 GFP_KERNEL);
2274 if (!vdev->vxge_entries) { 2272 if (!vdev->vxge_entries) {
2275 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed", 2273 vxge_debug_init(VXGE_ERR, "%s: memory allocation failed",
2276 VXGE_DRIVER_NAME); 2274 VXGE_DRIVER_NAME);
@@ -2278,9 +2276,7 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2278 return -ENOMEM; 2276 return -ENOMEM;
2279 } 2277 }
2280 2278
2281 /* Last vector in the list is used for alarm */ 2279 for (i = 0, j = 0; i < vdev->no_of_vpath; i++) {
2282 alarm_msix_id = VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2283 for (i = 0, j = 0; i < vdev->max_vpath_supported; i++) {
2284 2280
2285 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2281 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE;
2286 2282
@@ -2298,47 +2294,31 @@ static int vxge_alloc_msix(struct vxgedev *vdev)
2298 } 2294 }
2299 2295
2300 /* Initialize the alarm vector */ 2296 /* Initialize the alarm vector */
2301 vdev->entries[j].entry = alarm_msix_id; 2297 vdev->entries[j].entry = VXGE_ALARM_MSIX_ID;
2302 vdev->vxge_entries[j].entry = alarm_msix_id; 2298 vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
2303 vdev->vxge_entries[j].in_use = 0; 2299 vdev->vxge_entries[j].in_use = 0;
2304 2300
2305 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt); 2301 ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
2306 /* if driver request exceeeds available irq's, request with a small
2307 * number.
2308 */
2309 if (ret > 0) {
2310 vxge_debug_init(VXGE_ERR,
2311 "%s: MSI-X enable failed for %d vectors, available: %d",
2312 VXGE_DRIVER_NAME, intr_cnt, ret);
2313 vdev->max_vpath_supported = vdev->no_of_vpath;
2314 intr_cnt = (vdev->max_vpath_supported * 2) + 1;
2315
2316 /* Reset the alarm vector setting */
2317 vdev->entries[j].entry = 0;
2318 vdev->vxge_entries[j].entry = 0;
2319
2320 /* Initialize the alarm vector with new setting */
2321 vdev->entries[intr_cnt - 1].entry = alarm_msix_id;
2322 vdev->vxge_entries[intr_cnt - 1].entry = alarm_msix_id;
2323 vdev->vxge_entries[intr_cnt - 1].in_use = 0;
2324
2325 ret = pci_enable_msix(vdev->pdev, vdev->entries, intr_cnt);
2326 if (!ret)
2327 vxge_debug_init(VXGE_ERR,
2328 "%s: MSI-X enabled for %d vectors",
2329 VXGE_DRIVER_NAME, intr_cnt);
2330 }
2331 2302
2332 if (ret) { 2303 if (ret > 0) {
2333 vxge_debug_init(VXGE_ERR, 2304 vxge_debug_init(VXGE_ERR,
2334 "%s: MSI-X enable failed for %d vectors, ret: %d", 2305 "%s: MSI-X enable failed for %d vectors, ret: %d",
2335 VXGE_DRIVER_NAME, intr_cnt, ret); 2306 VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
2336 kfree(vdev->entries); 2307 kfree(vdev->entries);
2337 kfree(vdev->vxge_entries); 2308 kfree(vdev->vxge_entries);
2338 vdev->entries = NULL; 2309 vdev->entries = NULL;
2339 vdev->vxge_entries = NULL; 2310 vdev->vxge_entries = NULL;
2311
2312 if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3))
2313 return -ENODEV;
2314 /* Try with less no of vector by reducing no of vpaths count */
2315 temp = (ret - 1)/2;
2316 vxge_close_vpaths(vdev, temp);
2317 vdev->no_of_vpath = temp;
2318 goto start;
2319 } else if (ret < 0)
2340 return -ENODEV; 2320 return -ENODEV;
2341 } 2321
2342 return 0; 2322 return 0;
2343} 2323}
2344 2324
@@ -2346,43 +2326,26 @@ static int vxge_enable_msix(struct vxgedev *vdev)
2346{ 2326{
2347 2327
2348 int i, ret = 0; 2328 int i, ret = 0;
2349 enum vxge_hw_status status;
2350 /* 0 - Tx, 1 - Rx */ 2329 /* 0 - Tx, 1 - Rx */
2351 int tim_msix_id[4]; 2330 int tim_msix_id[4] = {0, 1, 0, 0};
2352 int alarm_msix_id = 0, msix_intr_vect = 0; 2331
2353 vdev->intr_cnt = 0; 2332 vdev->intr_cnt = 0;
2354 2333
2355 /* allocate msix vectors */ 2334 /* allocate msix vectors */
2356 ret = vxge_alloc_msix(vdev); 2335 ret = vxge_alloc_msix(vdev);
2357 if (!ret) { 2336 if (!ret) {
2358 /* Last vector in the list is used for alarm */
2359 alarm_msix_id =
2360 VXGE_HW_VPATH_MSIX_ACTIVE * vdev->no_of_vpath - 2;
2361 for (i = 0; i < vdev->no_of_vpath; i++) { 2337 for (i = 0; i < vdev->no_of_vpath; i++) {
2362 2338
2363 /* If fifo or ring are not enabled 2339 /* If fifo or ring are not enabled
2364 the MSIX vector for that should be set to 0 2340 the MSIX vector for that should be set to 0
2365 Hence initializeing this array to all 0s. 2341 Hence initializeing this array to all 0s.
2366 */ 2342 */
2367 memset(tim_msix_id, 0, sizeof(tim_msix_id)); 2343 vdev->vpaths[i].ring.rx_vector_no =
2368 msix_intr_vect = i * VXGE_HW_VPATH_MSIX_ACTIVE; 2344 (vdev->vpaths[i].device_id *
2369 tim_msix_id[0] = msix_intr_vect; 2345 VXGE_HW_VPATH_MSIX_ACTIVE) + 1;
2370 2346
2371 tim_msix_id[1] = msix_intr_vect + 1; 2347 vxge_hw_vpath_msix_set(vdev->vpaths[i].handle,
2372 vdev->vpaths[i].ring.rx_vector_no = tim_msix_id[1]; 2348 tim_msix_id, VXGE_ALARM_MSIX_ID);
2373
2374 status = vxge_hw_vpath_msix_set(
2375 vdev->vpaths[i].handle,
2376 tim_msix_id, alarm_msix_id);
2377 if (status != VXGE_HW_OK) {
2378 vxge_debug_init(VXGE_ERR,
2379 "vxge_hw_vpath_msix_set "
2380 "failed with status : %x", status);
2381 kfree(vdev->entries);
2382 kfree(vdev->vxge_entries);
2383 pci_disable_msix(vdev->pdev);
2384 return -ENODEV;
2385 }
2386 } 2349 }
2387 } 2350 }
2388 2351
@@ -2393,7 +2356,7 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2393{ 2356{
2394 int intr_cnt; 2357 int intr_cnt;
2395 2358
2396 for (intr_cnt = 0; intr_cnt < (vdev->max_vpath_supported * 2 + 1); 2359 for (intr_cnt = 0; intr_cnt < (vdev->no_of_vpath * 2 + 1);
2397 intr_cnt++) { 2360 intr_cnt++) {
2398 if (vdev->vxge_entries[intr_cnt].in_use) { 2361 if (vdev->vxge_entries[intr_cnt].in_use) {
2399 synchronize_irq(vdev->entries[intr_cnt].vector); 2362 synchronize_irq(vdev->entries[intr_cnt].vector);
@@ -2458,9 +2421,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2458 switch (msix_idx) { 2421 switch (msix_idx) {
2459 case 0: 2422 case 0:
2460 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2423 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2461 "%s:vxge fn: %d vpath: %d Tx MSI-X: %d", 2424 "%s:vxge:MSI-X %d - Tx - fn:%d vpath:%d",
2462 vdev->ndev->name, pci_fun, vp_idx, 2425 vdev->ndev->name,
2463 vdev->entries[intr_cnt].entry); 2426 vdev->entries[intr_cnt].entry,
2427 pci_fun, vp_idx);
2464 ret = request_irq( 2428 ret = request_irq(
2465 vdev->entries[intr_cnt].vector, 2429 vdev->entries[intr_cnt].vector,
2466 vxge_tx_msix_handle, 0, 2430 vxge_tx_msix_handle, 0,
@@ -2472,9 +2436,10 @@ static int vxge_add_isr(struct vxgedev *vdev)
2472 break; 2436 break;
2473 case 1: 2437 case 1:
2474 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2438 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2475 "%s:vxge fn: %d vpath: %d Rx MSI-X: %d", 2439 "%s:vxge:MSI-X %d - Rx - fn:%d vpath:%d",
2476 vdev->ndev->name, pci_fun, vp_idx, 2440 vdev->ndev->name,
2477 vdev->entries[intr_cnt].entry); 2441 vdev->entries[intr_cnt].entry,
2442 pci_fun, vp_idx);
2478 ret = request_irq( 2443 ret = request_irq(
2479 vdev->entries[intr_cnt].vector, 2444 vdev->entries[intr_cnt].vector,
2480 vxge_rx_msix_napi_handle, 2445 vxge_rx_msix_napi_handle,
@@ -2502,9 +2467,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2502 if (irq_req) { 2467 if (irq_req) {
2503 /* We requested for this msix interrupt */ 2468 /* We requested for this msix interrupt */
2504 vdev->vxge_entries[intr_cnt].in_use = 1; 2469 vdev->vxge_entries[intr_cnt].in_use = 1;
2470 msix_idx += vdev->vpaths[vp_idx].device_id *
2471 VXGE_HW_VPATH_MSIX_ACTIVE;
2505 vxge_hw_vpath_msix_unmask( 2472 vxge_hw_vpath_msix_unmask(
2506 vdev->vpaths[vp_idx].handle, 2473 vdev->vpaths[vp_idx].handle,
2507 intr_idx); 2474 msix_idx);
2508 intr_cnt++; 2475 intr_cnt++;
2509 } 2476 }
2510 2477
@@ -2514,16 +2481,17 @@ static int vxge_add_isr(struct vxgedev *vdev)
2514 vp_idx++; 2481 vp_idx++;
2515 } 2482 }
2516 2483
2517 intr_cnt = vdev->max_vpath_supported * 2; 2484 intr_cnt = vdev->no_of_vpath * 2;
2518 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN, 2485 snprintf(vdev->desc[intr_cnt], VXGE_INTR_STRLEN,
2519 "%s:vxge Alarm fn: %d MSI-X: %d", 2486 "%s:vxge:MSI-X %d - Alarm - fn:%d",
2520 vdev->ndev->name, pci_fun, 2487 vdev->ndev->name,
2521 vdev->entries[intr_cnt].entry); 2488 vdev->entries[intr_cnt].entry,
2489 pci_fun);
2522 /* For Alarm interrupts */ 2490 /* For Alarm interrupts */
2523 ret = request_irq(vdev->entries[intr_cnt].vector, 2491 ret = request_irq(vdev->entries[intr_cnt].vector,
2524 vxge_alarm_msix_handle, 0, 2492 vxge_alarm_msix_handle, 0,
2525 vdev->desc[intr_cnt], 2493 vdev->desc[intr_cnt],
2526 &vdev->vpaths[vp_idx]); 2494 &vdev->vpaths[0]);
2527 if (ret) { 2495 if (ret) {
2528 vxge_debug_init(VXGE_ERR, 2496 vxge_debug_init(VXGE_ERR,
2529 "%s: MSIX - %d Registration failed", 2497 "%s: MSIX - %d Registration failed",
@@ -2536,16 +2504,19 @@ static int vxge_add_isr(struct vxgedev *vdev)
2536 goto INTA_MODE; 2504 goto INTA_MODE;
2537 } 2505 }
2538 2506
2507 msix_idx = (vdev->vpaths[0].handle->vpath->vp_id *
2508 VXGE_HW_VPATH_MSIX_ACTIVE) + VXGE_ALARM_MSIX_ID;
2539 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2509 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
2540 intr_idx - 2); 2510 msix_idx);
2541 vdev->vxge_entries[intr_cnt].in_use = 1; 2511 vdev->vxge_entries[intr_cnt].in_use = 1;
2542 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[vp_idx]; 2512 vdev->vxge_entries[intr_cnt].arg = &vdev->vpaths[0];
2543 } 2513 }
2544INTA_MODE: 2514INTA_MODE:
2545#endif 2515#endif
2546 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2547 2516
2548 if (vdev->config.intr_type == INTA) { 2517 if (vdev->config.intr_type == INTA) {
2518 snprintf(vdev->desc[0], VXGE_INTR_STRLEN,
2519 "%s:vxge:INTA", vdev->ndev->name);
2549 vxge_hw_device_set_intr_type(vdev->devh, 2520 vxge_hw_device_set_intr_type(vdev->devh,
2550 VXGE_HW_INTR_MODE_IRQLINE); 2521 VXGE_HW_INTR_MODE_IRQLINE);
2551 vxge_hw_vpath_tti_ci_set(vdev->devh, 2522 vxge_hw_vpath_tti_ci_set(vdev->devh,
@@ -3995,6 +3966,36 @@ static void vxge_io_resume(struct pci_dev *pdev)
3995 netif_device_attach(netdev); 3966 netif_device_attach(netdev);
3996} 3967}
3997 3968
3969static inline u32 vxge_get_num_vfs(u64 function_mode)
3970{
3971 u32 num_functions = 0;
3972
3973 switch (function_mode) {
3974 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION:
3975 case VXGE_HW_FUNCTION_MODE_SRIOV_8:
3976 num_functions = 8;
3977 break;
3978 case VXGE_HW_FUNCTION_MODE_SINGLE_FUNCTION:
3979 num_functions = 1;
3980 break;
3981 case VXGE_HW_FUNCTION_MODE_SRIOV:
3982 case VXGE_HW_FUNCTION_MODE_MRIOV:
3983 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_17:
3984 num_functions = 17;
3985 break;
3986 case VXGE_HW_FUNCTION_MODE_SRIOV_4:
3987 num_functions = 4;
3988 break;
3989 case VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION_2:
3990 num_functions = 2;
3991 break;
3992 case VXGE_HW_FUNCTION_MODE_MRIOV_8:
3993 num_functions = 8; /* TODO */
3994 break;
3995 }
3996 return num_functions;
3997}
3998
3998/** 3999/**
3999 * vxge_probe 4000 * vxge_probe
4000 * @pdev : structure containing the PCI related information of the device. 4001 * @pdev : structure containing the PCI related information of the device.
@@ -4022,14 +4023,19 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4022 u8 *macaddr; 4023 u8 *macaddr;
4023 struct vxge_mac_addrs *entry; 4024 struct vxge_mac_addrs *entry;
4024 static int bus = -1, device = -1; 4025 static int bus = -1, device = -1;
4026 u32 host_type;
4025 u8 new_device = 0; 4027 u8 new_device = 0;
4028 enum vxge_hw_status is_privileged;
4029 u32 function_mode;
4030 u32 num_vfs = 0;
4026 4031
4027 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 4032 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
4028 attr.pdev = pdev; 4033 attr.pdev = pdev;
4029 4034
4030 if (bus != pdev->bus->number) 4035 /* In SRIOV-17 mode, functions of the same adapter
4031 new_device = 1; 4036 * can be deployed on different buses */
4032 if (device != PCI_SLOT(pdev->devfn)) 4037 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
4038 (device != PCI_SLOT(pdev->devfn))))
4033 new_device = 1; 4039 new_device = 1;
4034 4040
4035 bus = pdev->bus->number; 4041 bus = pdev->bus->number;
@@ -4046,9 +4052,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4046 driver_config->total_dev_cnt); 4052 driver_config->total_dev_cnt);
4047 driver_config->config_dev_cnt = 0; 4053 driver_config->config_dev_cnt = 0;
4048 driver_config->total_dev_cnt = 0; 4054 driver_config->total_dev_cnt = 0;
4049 driver_config->g_no_cpus = 0;
4050 } 4055 }
4051 4056 /* Now making the CPU based no of vpath calculation
4057 * applicable for individual functions as well.
4058 */
4059 driver_config->g_no_cpus = 0;
4052 driver_config->vpath_per_dev = max_config_vpath; 4060 driver_config->vpath_per_dev = max_config_vpath;
4053 4061
4054 driver_config->total_dev_cnt++; 4062 driver_config->total_dev_cnt++;
@@ -4161,6 +4169,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4161 "%s:%d Vpath mask = %llx", __func__, __LINE__, 4169 "%s:%d Vpath mask = %llx", __func__, __LINE__,
4162 (unsigned long long)vpath_mask); 4170 (unsigned long long)vpath_mask);
4163 4171
4172 function_mode = ll_config.device_hw_info.function_mode;
4173 host_type = ll_config.device_hw_info.host_type;
4174 is_privileged = __vxge_hw_device_is_privilaged(host_type,
4175 ll_config.device_hw_info.func_id);
4176
4164 /* Check how many vpaths are available */ 4177 /* Check how many vpaths are available */
4165 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 4178 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
4166 if (!((vpath_mask) & vxge_mBIT(i))) 4179 if (!((vpath_mask) & vxge_mBIT(i)))
@@ -4168,14 +4181,18 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4168 max_vpath_supported++; 4181 max_vpath_supported++;
4169 } 4182 }
4170 4183
4184 if (new_device)
4185 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4186
4171 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4187 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4172 if ((VXGE_HW_FUNCTION_MODE_SRIOV == 4188 if (is_sriov(function_mode) && (max_config_dev > 1) &&
4173 ll_config.device_hw_info.function_mode) && 4189 (ll_config.intr_type != INTA) &&
4174 (max_config_dev > 1) && (pdev->is_physfn)) { 4190 (is_privileged == VXGE_HW_OK)) {
4175 ret = pci_enable_sriov(pdev, max_config_dev - 1); 4191 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4176 if (ret) 4192 ? (max_config_dev - 1) : num_vfs);
4177 vxge_debug_ll_config(VXGE_ERR, 4193 if (ret)
4178 "Failed to enable SRIOV: %d \n", ret); 4194 vxge_debug_ll_config(VXGE_ERR,
4195 "Failed in enabling SRIOV mode: %d\n", ret);
4179 } 4196 }
4180 4197
4181 /* 4198 /*
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 7c83ba4be9d7..60276b20fa5e 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -31,6 +31,7 @@
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_USE_DEFAULT 0xffffffff 32#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 33#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2
34#define VXGE_HW_RXSYNC_FREQ_CNT 4 35#define VXGE_HW_RXSYNC_FREQ_CNT 4
35#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ) 36#define VXGE_LL_WATCH_DOG_TIMEOUT (15 * HZ)
36#define VXGE_LL_RX_COPY_THRESHOLD 256 37#define VXGE_LL_RX_COPY_THRESHOLD 256
@@ -89,6 +90,11 @@
89 90
90#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE) 91#define VXGE_LL_MAX_FRAME_SIZE(dev) ((dev)->mtu + VXGE_HW_MAC_HEADER_MAX_SIZE)
91 92
93#define is_sriov(function_mode) \
94 ((function_mode == VXGE_HW_FUNCTION_MODE_SRIOV) || \
95 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_8) || \
96 (function_mode == VXGE_HW_FUNCTION_MODE_SRIOV_4))
97
92enum vxge_reset_event { 98enum vxge_reset_event {
93 /* reset events */ 99 /* reset events */
94 VXGE_LL_VPATH_RESET = 0, 100 VXGE_LL_VPATH_RESET = 0,
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 2c012f4ce465..f83e6aee3f6a 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -231,8 +231,7 @@ void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
231{ 231{
232 232
233 __vxge_hw_pio_mem_write32_upper( 233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 234 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235 0, 32),
236 &channel->common_reg->set_msix_mask_vect[msix_id%4]); 235 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
237 236
238 return; 237 return;
@@ -252,8 +251,7 @@ vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
252{ 251{
253 252
254 __vxge_hw_pio_mem_write32_upper( 253 __vxge_hw_pio_mem_write32_upper(
255 (u32)vxge_bVALn(vxge_mBIT(channel->first_vp_id+(msix_id/4)), 254 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
256 0, 32),
257 &channel->common_reg->clear_msix_mask_vect[msix_id%4]); 255 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
258 256
259 return; 257 return;
@@ -878,7 +876,7 @@ void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
878 876
879 channel = &ring->channel; 877 channel = &ring->channel;
880 878
881 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 879 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
882 880
883 if (ring->stats->common_stats.usage_cnt > 0) 881 if (ring->stats->common_stats.usage_cnt > 0)
884 ring->stats->common_stats.usage_cnt--; 882 ring->stats->common_stats.usage_cnt--;
@@ -902,7 +900,7 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
902 channel = &ring->channel; 900 channel = &ring->channel;
903 901
904 wmb(); 902 wmb();
905 rxdp->control_0 |= VXGE_HW_RING_RXD_LIST_OWN_ADAPTER; 903 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
906 904
907 vxge_hw_channel_dtr_post(channel, rxdh); 905 vxge_hw_channel_dtr_post(channel, rxdh);
908 906
@@ -966,6 +964,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
966 struct __vxge_hw_channel *channel; 964 struct __vxge_hw_channel *channel;
967 struct vxge_hw_ring_rxd_1 *rxdp; 965 struct vxge_hw_ring_rxd_1 *rxdp;
968 enum vxge_hw_status status = VXGE_HW_OK; 966 enum vxge_hw_status status = VXGE_HW_OK;
967 u64 control_0, own;
969 968
970 channel = &ring->channel; 969 channel = &ring->channel;
971 970
@@ -977,8 +976,12 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
977 goto exit; 976 goto exit;
978 } 977 }
979 978
979 control_0 = rxdp->control_0;
980 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
981 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
982
980 /* check whether it is not the end */ 983 /* check whether it is not the end */
981 if (!(rxdp->control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER)) { 984 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
982 985
983 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 986 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
984 0); 987 0);
@@ -986,8 +989,6 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
986 ++ring->cmpl_cnt; 989 ++ring->cmpl_cnt;
987 vxge_hw_channel_dtr_complete(channel); 990 vxge_hw_channel_dtr_complete(channel);
988 991
989 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(rxdp->control_0);
990
991 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED); 992 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
992 993
993 ring->stats->common_stats.usage_cnt++; 994 ring->stats->common_stats.usage_cnt++;
@@ -1035,12 +1036,13 @@ enum vxge_hw_status vxge_hw_ring_handle_tcode(
1035 * such as unknown UPV6 header), Drop it !!! 1036 * such as unknown UPV6 header), Drop it !!!
1036 */ 1037 */
1037 1038
1038 if (t_code == 0 || t_code == 5) { 1039 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1040 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1039 status = VXGE_HW_OK; 1041 status = VXGE_HW_OK;
1040 goto exit; 1042 goto exit;
1041 } 1043 }
1042 1044
1043 if (t_code > 0xF) { 1045 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1044 status = VXGE_HW_ERR_INVALID_TCODE; 1046 status = VXGE_HW_ERR_INVALID_TCODE;
1045 goto exit; 1047 goto exit;
1046 } 1048 }
@@ -2216,29 +2218,24 @@ exit:
2216 * This API will associate a given MSIX vector numbers with the four TIM 2218 * This API will associate a given MSIX vector numbers with the four TIM
2217 * interrupts and alarm interrupt. 2219 * interrupts and alarm interrupt.
2218 */ 2220 */
2219enum vxge_hw_status 2221void
2220vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id, 2222vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2221 int alarm_msix_id) 2223 int alarm_msix_id)
2222{ 2224{
2223 u64 val64; 2225 u64 val64;
2224 struct __vxge_hw_virtualpath *vpath = vp->vpath; 2226 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2225 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg; 2227 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2226 u32 first_vp_id = vpath->hldev->first_vp_id; 2228 u32 vp_id = vp->vpath->vp_id;
2227 2229
2228 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI( 2230 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2229 (first_vp_id * 4) + tim_msix_id[0]) | 2231 (vp_id * 4) + tim_msix_id[0]) |
2230 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI( 2232 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2231 (first_vp_id * 4) + tim_msix_id[1]) | 2233 (vp_id * 4) + tim_msix_id[1]);
2232 VXGE_HW_INTERRUPT_CFG0_GROUP2_MSIX_FOR_TXTI(
2233 (first_vp_id * 4) + tim_msix_id[2]);
2234
2235 val64 |= VXGE_HW_INTERRUPT_CFG0_GROUP3_MSIX_FOR_TXTI(
2236 (first_vp_id * 4) + tim_msix_id[3]);
2237 2234
2238 writeq(val64, &vp_reg->interrupt_cfg0); 2235 writeq(val64, &vp_reg->interrupt_cfg0);
2239 2236
2240 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG( 2237 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2241 (first_vp_id * 4) + alarm_msix_id), 2238 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2242 &vp_reg->interrupt_cfg2); 2239 &vp_reg->interrupt_cfg2);
2243 2240
2244 if (vpath->hldev->config.intr_mode == 2241 if (vpath->hldev->config.intr_mode ==
@@ -2259,7 +2256,7 @@ vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2259 0, 32), &vp_reg->one_shot_vect3_en); 2256 0, 32), &vp_reg->one_shot_vect3_en);
2260 } 2257 }
2261 2258
2262 return VXGE_HW_OK; 2259 return;
2263} 2260}
2264 2261
2265/** 2262/**
@@ -2279,8 +2276,7 @@ vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2279{ 2276{
2280 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2277 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2281 __vxge_hw_pio_mem_write32_upper( 2278 __vxge_hw_pio_mem_write32_upper(
2282 (u32) vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2279 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2283 (msix_id / 4)), 0, 32),
2284 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]); 2280 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2285 2281
2286 return; 2282 return;
@@ -2305,14 +2301,12 @@ vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2305 if (hldev->config.intr_mode == 2301 if (hldev->config.intr_mode ==
2306 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) { 2302 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2307 __vxge_hw_pio_mem_write32_upper( 2303 __vxge_hw_pio_mem_write32_upper(
2308 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2304 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2309 (msix_id/4)), 0, 32),
2310 &hldev->common_reg-> 2305 &hldev->common_reg->
2311 clr_msix_one_shot_vec[msix_id%4]); 2306 clr_msix_one_shot_vec[msix_id%4]);
2312 } else { 2307 } else {
2313 __vxge_hw_pio_mem_write32_upper( 2308 __vxge_hw_pio_mem_write32_upper(
2314 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2309 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2315 (msix_id/4)), 0, 32),
2316 &hldev->common_reg-> 2310 &hldev->common_reg->
2317 clear_msix_mask_vect[msix_id%4]); 2311 clear_msix_mask_vect[msix_id%4]);
2318 } 2312 }
@@ -2337,8 +2331,7 @@ vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2337{ 2331{
2338 struct __vxge_hw_device *hldev = vp->vpath->hldev; 2332 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2339 __vxge_hw_pio_mem_write32_upper( 2333 __vxge_hw_pio_mem_write32_upper(
2340 (u32)vxge_bVALn(vxge_mBIT(hldev->first_vp_id + 2334 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2341 (msix_id/4)), 0, 32),
2342 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]); 2335 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2343 2336
2344 return; 2337 return;
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 861c853e3e84..c252f3d3f650 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1866,6 +1866,51 @@ struct vxge_hw_ring_rxd_info {
1866 u32 rth_hash_type; 1866 u32 rth_hash_type;
1867 u32 rth_value; 1867 u32 rth_value;
1868}; 1868};
1869/**
1870 * enum vxge_hw_ring_tcode - Transfer codes returned by adapter
1871 * @VXGE_HW_RING_T_CODE_OK: Transfer ok.
1872 * @VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH: Layer 3 checksum presentation
1873 * configuration mismatch.
1874 * @VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH: Layer 4 checksum presentation
1875 * configuration mismatch.
1876 * @VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH: Layer 3 and Layer 4 checksum
1877 * presentation configuration mismatch.
1878 * @VXGE_HW_RING_T_CODE_L3_PKT_ERR: Layer 3 error unparseable packet,
1879 * such as unknown IPv6 header.
1880 * @VXGE_HW_RING_T_CODE_L2_FRM_ERR: Layer 2 error frame integrity
1881 * error, such as FCS or ECC).
1882 * @VXGE_HW_RING_T_CODE_BUF_SIZE_ERR: Buffer size error the RxD buffer(
1883 * s) were not appropriately sized and data loss occurred.
1884 * @VXGE_HW_RING_T_CODE_INT_ECC_ERR: Internal ECC error RxD corrupted.
1885 * @VXGE_HW_RING_T_CODE_BENIGN_OVFLOW: Benign overflow the contents of
1886 * Segment1 exceeded the capacity of Buffer1 and the remainder
1887 * was placed in Buffer2. Segment2 now starts in Buffer3.
1888 * No data loss or errors occurred.
1889 * @VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF: Buffer size 0 one of the RxDs
1890 * assigned buffers has a size of 0 bytes.
1891 * @VXGE_HW_RING_T_CODE_FRM_DROP: Frame dropped either due to
1892 * VPath Reset or because of a VPIN mismatch.
1893 * @VXGE_HW_RING_T_CODE_UNUSED: Unused
1894 * @VXGE_HW_RING_T_CODE_MULTI_ERR: Multiple errors more than one
1895 * transfer code condition occurred.
1896 *
1897 * Transfer codes returned by adapter.
1898 */
1899enum vxge_hw_ring_tcode {
1900 VXGE_HW_RING_T_CODE_OK = 0x0,
1901 VXGE_HW_RING_T_CODE_L3_CKSUM_MISMATCH = 0x1,
1902 VXGE_HW_RING_T_CODE_L4_CKSUM_MISMATCH = 0x2,
1903 VXGE_HW_RING_T_CODE_L3_L4_CKSUM_MISMATCH = 0x3,
1904 VXGE_HW_RING_T_CODE_L3_PKT_ERR = 0x5,
1905 VXGE_HW_RING_T_CODE_L2_FRM_ERR = 0x6,
1906 VXGE_HW_RING_T_CODE_BUF_SIZE_ERR = 0x7,
1907 VXGE_HW_RING_T_CODE_INT_ECC_ERR = 0x8,
1908 VXGE_HW_RING_T_CODE_BENIGN_OVFLOW = 0x9,
1909 VXGE_HW_RING_T_CODE_ZERO_LEN_BUFF = 0xA,
1910 VXGE_HW_RING_T_CODE_FRM_DROP = 0xC,
1911 VXGE_HW_RING_T_CODE_UNUSED = 0xE,
1912 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1913};
1869 1914
1870/** 1915/**
1871 * enum enum vxge_hw_ring_hash_type - RTH hash types 1916 * enum enum vxge_hw_ring_hash_type - RTH hash types
@@ -1910,7 +1955,7 @@ vxge_hw_ring_rxd_post_post(
1910 void *rxdh); 1955 void *rxdh);
1911 1956
1912enum vxge_hw_status 1957enum vxge_hw_status
1913vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle, u16 min_flag); 1958vxge_hw_ring_replenish(struct __vxge_hw_ring *ring_handle);
1914 1959
1915void 1960void
1916vxge_hw_ring_rxd_post_post_wmb( 1961vxge_hw_ring_rxd_post_post_wmb(
@@ -2042,7 +2087,6 @@ void vxge_hw_fifo_txdl_free(
2042 2087
2043#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8) 2088#define VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET (VXGE_HW_BLOCK_SIZE-8)
2044#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16) 2089#define VXGE_HW_RING_MEMBLOCK_IDX_OFFSET (VXGE_HW_BLOCK_SIZE-16)
2045#define VXGE_HW_RING_MIN_BUFF_ALLOCATION 64
2046 2090
2047/* 2091/*
2048 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data. 2092 * struct __vxge_hw_ring_rxd_priv - Receive descriptor HW-private data.
@@ -2332,7 +2376,7 @@ enum vxge_hw_status vxge_hw_vpath_alarm_process(
2332 struct __vxge_hw_vpath_handle *vpath_handle, 2376 struct __vxge_hw_vpath_handle *vpath_handle,
2333 u32 skip_alarms); 2377 u32 skip_alarms);
2334 2378
2335enum vxge_hw_status 2379void
2336vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle, 2380vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vpath_handle,
2337 int *tim_msix_id, int alarm_msix_id); 2381 int *tim_msix_id, int alarm_msix_id);
2338 2382
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 77c2a754b7b8..5da7ab1fd307 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
17 17
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "6" 20#define VXGE_VERSION_FIX "8"
21#define VXGE_VERSION_BUILD "18937" 21#define VXGE_VERSION_BUILD "20182"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index 3f744c643094..17502d80b65a 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -396,7 +396,7 @@ static void tx1_dma_buf_check(pc300_t * card, int ch)
396 u16 next_bd = card->chan[ch].tx_next_bd; 396 u16 next_bd = card->chan[ch].tx_next_bd;
397 u32 scabase = card->hw.scabase; 397 u32 scabase = card->hw.scabase;
398 398
399 printk ("\nnfree_tx_bd = %d \n", card->chan[ch].nfree_tx_bd); 399 printk ("\nnfree_tx_bd = %d\n", card->chan[ch].nfree_tx_bd);
400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch, 400 printk("#CH%d: f_bd = %d(0x%08x), n_bd = %d(0x%08x)\n", ch,
401 first_bd, TX_BD_ADDR(ch, first_bd), 401 first_bd, TX_BD_ADDR(ch, first_bd),
402 next_bd, TX_BD_ADDR(ch, next_bd)); 402 next_bd, TX_BD_ADDR(ch, next_bd));
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c
index 4917a94943bd..4293889e287e 100644
--- a/drivers/net/wan/pc300_tty.c
+++ b/drivers/net/wan/pc300_tty.c
@@ -366,7 +366,7 @@ static void cpc_tty_close(struct tty_struct *tty, struct file *flip)
366 int res; 366 int res;
367 367
368 if (!tty || !tty->driver_data ) { 368 if (!tty || !tty->driver_data ) {
369 CPC_TTY_DBG("hdlx-tty: no TTY in close \n"); 369 CPC_TTY_DBG("hdlx-tty: no TTY in close\n");
370 return; 370 return;
371 } 371 }
372 372
diff --git a/drivers/net/wan/sdla.c b/drivers/net/wan/sdla.c
index 31c41af2246d..43ae6f440bfb 100644
--- a/drivers/net/wan/sdla.c
+++ b/drivers/net/wan/sdla.c
@@ -1352,7 +1352,7 @@ static int sdla_set_config(struct net_device *dev, struct ifmap *map)
1352 return(-EINVAL); 1352 return(-EINVAL);
1353 1353
1354 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){ 1354 if (!request_region(map->base_addr, SDLA_IO_EXTENTS, dev->name)){
1355 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use \n", dev->base_addr); 1355 printk(KERN_WARNING "SDLA: io-port 0x%04lx in use\n", dev->base_addr);
1356 return(-EINVAL); 1356 return(-EINVAL);
1357 } 1357 }
1358 base = map->base_addr; 1358 base = map->base_addr;
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index b0cb90624cf6..6db909ecf1c9 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -689,7 +689,7 @@ try_new:
689 pl_type, buf_len); 689 pl_type, buf_len);
690 tx_msg->num_pls = le16_to_cpu(num_pls+1); 690 tx_msg->num_pls = le16_to_cpu(num_pls+1);
691 tx_msg->size += padded_len; 691 tx_msg->size += padded_len;
692 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u \n", 692 d_printf(2, dev, "TX: appended %zu b (up to %u b) pl #%u\n",
693 padded_len, tx_msg->size, num_pls+1); 693 padded_len, tx_msg->size, num_pls+1);
694 d_printf(2, dev, 694 d_printf(2, dev,
695 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n", 695 "TX: appended hdr @%zu %zu b pl #%u @%zu %zu/%zu b\n",
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index ab61d2b558d6..880ad9d170c2 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -1318,21 +1318,19 @@ static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
1318} 1318}
1319 1319
1320static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw, 1320static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
1321 int mc_count, struct dev_addr_list *mclist) 1321 struct netdev_hw_addr_list *mc_list)
1322{ 1322{
1323 unsigned int bit_nr, i; 1323 unsigned int bit_nr;
1324 u32 mc_filter[2]; 1324 u32 mc_filter[2];
1325 struct netdev_hw_addr *ha;
1325 1326
1326 mc_filter[1] = mc_filter[0] = 0; 1327 mc_filter[1] = mc_filter[0] = 0;
1327 1328
1328 for (i = 0; i < mc_count; i++) { 1329 netdev_hw_addr_list_for_each(ha, mc_list) {
1329 if (!mclist) 1330 bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1330 break;
1331 bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1332 1331
1333 bit_nr &= 0x3F; 1332 bit_nr &= 0x3F;
1334 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 1333 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1335 mclist = mclist->next;
1336 } 1334 }
1337 1335
1338 return mc_filter[0] | ((u64)(mc_filter[1]) << 32); 1336 return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index dc5018a6d9ed..a441aad922c2 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -2876,7 +2876,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
2876 ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0; 2876 ai->wep_capable = (cap_rid.softCap & cpu_to_le16(0x02)) ? 1 : 0;
2877 ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0; 2877 ai->max_wep_idx = (cap_rid.softCap & cpu_to_le16(0x80)) ? 3 : 0;
2878 2878
2879 airo_print_info(dev->name, "Firmware version %x.%x.%02x", 2879 airo_print_info(dev->name, "Firmware version %x.%x.%02d",
2880 ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF), 2880 ((le16_to_cpu(cap_rid.softVer) >> 8) & 0xF),
2881 (le16_to_cpu(cap_rid.softVer) & 0xFF), 2881 (le16_to_cpu(cap_rid.softVer) & 0xFF),
2882 le16_to_cpu(cap_rid.softSubVer)); 2882 le16_to_cpu(cap_rid.softSubVer));
@@ -3193,19 +3193,26 @@ static void airo_print_status(const char *devname, u16 status)
3193{ 3193{
3194 u8 reason = status & 0xFF; 3194 u8 reason = status & 0xFF;
3195 3195
3196 switch (status) { 3196 switch (status & 0xFF00) {
3197 case STAT_NOBEACON: 3197 case STAT_NOBEACON:
3198 airo_print_dbg(devname, "link lost (missed beacons)"); 3198 switch (status) {
3199 break; 3199 case STAT_NOBEACON:
3200 case STAT_MAXRETRIES: 3200 airo_print_dbg(devname, "link lost (missed beacons)");
3201 case STAT_MAXARL: 3201 break;
3202 airo_print_dbg(devname, "link lost (max retries)"); 3202 case STAT_MAXRETRIES:
3203 break; 3203 case STAT_MAXARL:
3204 case STAT_FORCELOSS: 3204 airo_print_dbg(devname, "link lost (max retries)");
3205 airo_print_dbg(devname, "link lost (local choice)"); 3205 break;
3206 break; 3206 case STAT_FORCELOSS:
3207 case STAT_TSFSYNC: 3207 airo_print_dbg(devname, "link lost (local choice)");
3208 airo_print_dbg(devname, "link lost (TSF sync lost)"); 3208 break;
3209 case STAT_TSFSYNC:
3210 airo_print_dbg(devname, "link lost (TSF sync lost)");
3211 break;
3212 default:
3213 airo_print_dbg(devname, "unknow status %x\n", status);
3214 break;
3215 }
3209 break; 3216 break;
3210 case STAT_DEAUTH: 3217 case STAT_DEAUTH:
3211 airo_print_dbg(devname, "deauthenticated (reason: %d)", reason); 3218 airo_print_dbg(devname, "deauthenticated (reason: %d)", reason);
@@ -3221,7 +3228,11 @@ static void airo_print_status(const char *devname, u16 status)
3221 airo_print_dbg(devname, "authentication failed (reason: %d)", 3228 airo_print_dbg(devname, "authentication failed (reason: %d)",
3222 reason); 3229 reason);
3223 break; 3230 break;
3231 case STAT_ASSOC:
3232 case STAT_REASSOC:
3233 break;
3224 default: 3234 default:
3235 airo_print_dbg(devname, "unknow status %x\n", status);
3225 break; 3236 break;
3226 } 3237 }
3227} 3238}
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 4e7a7fd695c8..0a75be027afa 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -3,7 +3,7 @@ menuconfig ATH_COMMON
3 depends on CFG80211 3 depends on CFG80211
4 ---help--- 4 ---help---
5 This will enable the support for the Atheros wireless drivers. 5 This will enable the support for the Atheros wireless drivers.
6 ath5k, ath9k and ar9170 drivers share some common code, this option 6 ath5k, ath9k, ath9k_htc and ar9170 drivers share some common code, this option
7 enables the common ath.ko module which shares common helpers. 7 enables the common ath.ko module which shares common helpers.
8 8
9 For more information and documentation on this module you can visit: 9 For more information and documentation on this module you can visit:
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index 826c45e6b274..ec8134b4b949 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -79,7 +79,7 @@ __regwrite_out : \
79 if (__nreg) { \ 79 if (__nreg) { \
80 if (IS_ACCEPTING_CMD(__ar)) \ 80 if (IS_ACCEPTING_CMD(__ar)) \
81 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \ 81 __err = ar->exec_cmd(__ar, AR9170_CMD_WREG, \
82 8 * __nreg, \ 82 8 * __nreg, \
83 (u8 *) &__ar->cmdbuf[1], \ 83 (u8 *) &__ar->cmdbuf[1], \
84 0, NULL); \ 84 0, NULL); \
85 __nreg = 0; \ 85 __nreg = 0; \
diff --git a/drivers/net/wireless/ath/ar9170/eeprom.h b/drivers/net/wireless/ath/ar9170/eeprom.h
index d2c8cc83f1dd..6c4663883423 100644
--- a/drivers/net/wireless/ath/ar9170/eeprom.h
+++ b/drivers/net/wireless/ath/ar9170/eeprom.h
@@ -127,8 +127,8 @@ struct ar9170_eeprom {
127 __le16 checksum; 127 __le16 checksum;
128 __le16 version; 128 __le16 version;
129 u8 operating_flags; 129 u8 operating_flags;
130#define AR9170_OPFLAG_5GHZ 1 130#define AR9170_OPFLAG_5GHZ 1
131#define AR9170_OPFLAG_2GHZ 2 131#define AR9170_OPFLAG_2GHZ 2
132 u8 misc; 132 u8 misc;
133 __le16 reg_domain[2]; 133 __le16 reg_domain[2];
134 u8 mac_address[6]; 134 u8 mac_address[6];
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 0a1d4c28e68a..06f1f3c951a4 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -425,5 +425,6 @@ enum ar9170_txq {
425 425
426#define AR9170_TXQ_DEPTH 32 426#define AR9170_TXQ_DEPTH 32
427#define AR9170_TX_MAX_PENDING 128 427#define AR9170_TX_MAX_PENDING 128
428#define AR9170_RX_STREAM_MAX_SIZE 65535
428 429
429#endif /* __AR9170_HW_H */ 430#endif /* __AR9170_HW_H */
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c53692980990..0312cee39570 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -236,7 +236,7 @@ static void __ar9170_dump_txqueue(struct ar9170 *ar,
236 wiphy_name(ar->hw->wiphy), skb_queue_len(queue)); 236 wiphy_name(ar->hw->wiphy), skb_queue_len(queue));
237 237
238 skb_queue_walk(queue, skb) { 238 skb_queue_walk(queue, skb) {
239 printk(KERN_DEBUG "index:%d => \n", i++); 239 printk(KERN_DEBUG "index:%d =>\n", i++);
240 ar9170_print_txheader(ar, skb); 240 ar9170_print_txheader(ar, skb);
241 } 241 }
242 if (i != skb_queue_len(queue)) 242 if (i != skb_queue_len(queue))
@@ -281,7 +281,7 @@ static void ar9170_dump_tx_status_ampdu(struct ar9170 *ar)
281 unsigned long flags; 281 unsigned long flags;
282 282
283 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags); 283 spin_lock_irqsave(&ar->tx_status_ampdu.lock, flags);
284 printk(KERN_DEBUG "%s: A-MPDU tx_status queue => \n", 284 printk(KERN_DEBUG "%s: A-MPDU tx_status queue =>\n",
285 wiphy_name(ar->hw->wiphy)); 285 wiphy_name(ar->hw->wiphy));
286 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu); 286 __ar9170_dump_txqueue(ar, &ar->tx_status_ampdu);
287 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags); 287 spin_unlock_irqrestore(&ar->tx_status_ampdu.lock, flags);
@@ -308,7 +308,7 @@ static void ar9170_recycle_expired(struct ar9170 *ar,
308 if (time_is_before_jiffies(arinfo->timeout)) { 308 if (time_is_before_jiffies(arinfo->timeout)) {
309#ifdef AR9170_QUEUE_DEBUG 309#ifdef AR9170_QUEUE_DEBUG
310 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => " 310 printk(KERN_DEBUG "%s: [%ld > %ld] frame expired => "
311 "recycle \n", wiphy_name(ar->hw->wiphy), 311 "recycle\n", wiphy_name(ar->hw->wiphy),
312 jiffies, arinfo->timeout); 312 jiffies, arinfo->timeout);
313 ar9170_print_txheader(ar, skb); 313 ar9170_print_txheader(ar, skb);
314#endif /* AR9170_QUEUE_DEBUG */ 314#endif /* AR9170_QUEUE_DEBUG */
@@ -689,7 +689,8 @@ void ar9170_handle_command_response(struct ar9170 *ar, void *buf, u32 len)
689 689
690 /* firmware debug */ 690 /* firmware debug */
691 case 0xca: 691 case 0xca:
692 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4, (char *)buf + 4); 692 printk(KERN_DEBUG "ar9170 FW: %.*s\n", len - 4,
693 (char *)buf + 4);
693 break; 694 break;
694 case 0xcb: 695 case 0xcb:
695 len -= 4; 696 len -= 4;
@@ -1728,7 +1729,7 @@ static void ar9170_tx(struct ar9170 *ar)
1728 printk(KERN_DEBUG "%s: queue %d full\n", 1729 printk(KERN_DEBUG "%s: queue %d full\n",
1729 wiphy_name(ar->hw->wiphy), i); 1730 wiphy_name(ar->hw->wiphy), i);
1730 1731
1731 printk(KERN_DEBUG "%s: stuck frames: ===> \n", 1732 printk(KERN_DEBUG "%s: stuck frames: ===>\n",
1732 wiphy_name(ar->hw->wiphy)); 1733 wiphy_name(ar->hw->wiphy));
1733 ar9170_dump_txqueue(ar, &ar->tx_pending[i]); 1734 ar9170_dump_txqueue(ar, &ar->tx_pending[i]);
1734 ar9170_dump_txqueue(ar, &ar->tx_status[i]); 1735 ar9170_dump_txqueue(ar, &ar->tx_status[i]);
@@ -2046,21 +2047,17 @@ out:
2046 return err; 2047 return err;
2047} 2048}
2048 2049
2049static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 2050static u64 ar9170_op_prepare_multicast(struct ieee80211_hw *hw,
2050 struct dev_addr_list *mclist) 2051 struct netdev_hw_addr_list *mc_list)
2051{ 2052{
2052 u64 mchash; 2053 u64 mchash;
2053 int i; 2054 struct netdev_hw_addr *ha;
2054 2055
2055 /* always get broadcast frames */ 2056 /* always get broadcast frames */
2056 mchash = 1ULL << (0xff >> 2); 2057 mchash = 1ULL << (0xff >> 2);
2057 2058
2058 for (i = 0; i < mc_count; i++) { 2059 netdev_hw_addr_list_for_each(ha, mc_list)
2059 if (WARN_ON(!mclist)) 2060 mchash |= 1ULL << (ha->addr[5] >> 2);
2060 break;
2061 mchash |= 1ULL << (mclist->dmi_addr[5] >> 2);
2062 mclist = mclist->next;
2063 }
2064 2061
2065 return mchash; 2062 return mchash;
2066} 2063}
@@ -2516,7 +2513,7 @@ void *ar9170_alloc(size_t priv_size)
2516 * tends to split the streams into separate rx descriptors. 2513 * tends to split the streams into separate rx descriptors.
2517 */ 2514 */
2518 2515
2519 skb = __dev_alloc_skb(AR9170_MAX_RX_BUFFER_SIZE, GFP_KERNEL); 2516 skb = __dev_alloc_skb(AR9170_RX_STREAM_MAX_SIZE, GFP_KERNEL);
2520 if (!skb) 2517 if (!skb)
2521 goto err_nomem; 2518 goto err_nomem;
2522 2519
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 99a6da464bd3..c1c7c427501c 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -67,18 +67,28 @@ static struct usb_device_id ar9170_usb_ids[] = {
67 { USB_DEVICE(0x0cf3, 0x1001) }, 67 { USB_DEVICE(0x0cf3, 0x1001) },
68 /* TP-Link TL-WN821N v2 */ 68 /* TP-Link TL-WN821N v2 */
69 { USB_DEVICE(0x0cf3, 0x1002) }, 69 { USB_DEVICE(0x0cf3, 0x1002) },
70 /* 3Com Dual Band 802.11n USB Adapter */
71 { USB_DEVICE(0x0cf3, 0x1010) },
72 /* H3C Dual Band 802.11n USB Adapter */
73 { USB_DEVICE(0x0cf3, 0x1011) },
70 /* Cace Airpcap NX */ 74 /* Cace Airpcap NX */
71 { USB_DEVICE(0xcace, 0x0300) }, 75 { USB_DEVICE(0xcace, 0x0300) },
72 /* D-Link DWA 160 A1 */ 76 /* D-Link DWA 160 A1 */
73 { USB_DEVICE(0x07d1, 0x3c10) }, 77 { USB_DEVICE(0x07d1, 0x3c10) },
74 /* D-Link DWA 160 A2 */ 78 /* D-Link DWA 160 A2 */
75 { USB_DEVICE(0x07d1, 0x3a09) }, 79 { USB_DEVICE(0x07d1, 0x3a09) },
80 /* Netgear WNA1000 */
81 { USB_DEVICE(0x0846, 0x9040) },
76 /* Netgear WNDA3100 */ 82 /* Netgear WNDA3100 */
77 { USB_DEVICE(0x0846, 0x9010) }, 83 { USB_DEVICE(0x0846, 0x9010) },
78 /* Netgear WN111 v2 */ 84 /* Netgear WN111 v2 */
79 { USB_DEVICE(0x0846, 0x9001) }, 85 { USB_DEVICE(0x0846, 0x9001) },
80 /* Zydas ZD1221 */ 86 /* Zydas ZD1221 */
81 { USB_DEVICE(0x0ace, 0x1221) }, 87 { USB_DEVICE(0x0ace, 0x1221) },
88 /* Proxim ORiNOCO 802.11n USB */
89 { USB_DEVICE(0x1435, 0x0804) },
90 /* WNC Generic 11n USB Dongle */
91 { USB_DEVICE(0x1435, 0x0326) },
82 /* ZyXEL NWD271N */ 92 /* ZyXEL NWD271N */
83 { USB_DEVICE(0x0586, 0x3417) }, 93 { USB_DEVICE(0x0586, 0x3417) },
84 /* Z-Com UB81 BG */ 94 /* Z-Com UB81 BG */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 71fc960814f0..1fbf6b1f9a7e 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -48,6 +48,12 @@ enum ath_device_state {
48 ATH_HW_INITIALIZED, 48 ATH_HW_INITIALIZED,
49}; 49};
50 50
51enum ath_bus_type {
52 ATH_PCI,
53 ATH_AHB,
54 ATH_USB,
55};
56
51struct reg_dmn_pair_mapping { 57struct reg_dmn_pair_mapping {
52 u16 regDmnEnum; 58 u16 regDmnEnum;
53 u16 reg_5ghz_ctl; 59 u16 reg_5ghz_ctl;
@@ -73,9 +79,10 @@ struct ath_ops {
73struct ath_common; 79struct ath_common;
74 80
75struct ath_bus_ops { 81struct ath_bus_ops {
76 void (*read_cachesize)(struct ath_common *common, int *csz); 82 enum ath_bus_type ath_bus_type;
77 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 83 void (*read_cachesize)(struct ath_common *common, int *csz);
78 void (*bt_coex_prep)(struct ath_common *common); 84 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
85 void (*bt_coex_prep)(struct ath_common *common);
79}; 86};
80 87
81struct ath_common { 88struct ath_common {
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 090dc6d268a3..cc09595b781a 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -12,5 +12,6 @@ ath5k-y += attach.o
12ath5k-y += base.o 12ath5k-y += base.o
13ath5k-y += led.o 13ath5k-y += led.o
14ath5k-y += rfkill.o 14ath5k-y += rfkill.o
15ath5k-y += ani.o
15ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 16ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
16obj-$(CONFIG_ATH5K) += ath5k.o 17obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
new file mode 100644
index 000000000000..584a32859bdb
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -0,0 +1,744 @@
1/*
2 * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath5k.h"
18#include "base.h"
19#include "reg.h"
20#include "debug.h"
21#include "ani.h"
22
23/**
24 * DOC: Basic ANI Operation
25 *
26 * Adaptive Noise Immunity (ANI) controls five noise immunity parameters
27 * depending on the amount of interference in the environment, increasing
28 * or reducing sensitivity as necessary.
29 *
30 * The parameters are:
31 * - "noise immunity"
32 * - "spur immunity"
33 * - "firstep level"
34 * - "OFDM weak signal detection"
35 * - "CCK weak signal detection"
36 *
37 * Basically we look at the amount of ODFM and CCK timing errors we get and then
38 * raise or lower immunity accordingly by setting one or more of these
39 * parameters.
40 * Newer chipsets have PHY error counters in hardware which will generate a MIB
41 * interrupt when they overflow. Older hardware has too enable PHY error frames
42 * by setting a RX flag and then count every single PHY error. When a specified
43 * threshold of errors has been reached we will raise immunity.
44 * Also we regularly check the amount of errors and lower or raise immunity as
45 * necessary.
46 */
47
48
49/*** ANI parameter control ***/
50
51/**
52 * ath5k_ani_set_noise_immunity_level() - Set noise immunity level
53 *
54 * @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
55 */
56void
57ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
58{
59 /* TODO:
60 * ANI documents suggest the following five levels to use, but the HAL
61 * and ath9k use only use the last two levels, making this
62 * essentially an on/off option. There *may* be a reason for this (???),
63 * so i stick with the HAL version for now...
64 */
65#if 0
66 const s8 hi[] = { -18, -18, -16, -14, -12 };
67 const s8 lo[] = { -52, -56, -60, -64, -70 };
68 const s8 sz[] = { -34, -41, -48, -55, -62 };
69 const s8 fr[] = { -70, -72, -75, -78, -80 };
70#else
71 const s8 sz[] = { -55, -62 };
72 const s8 lo[] = { -64, -70 };
73 const s8 hi[] = { -14, -12 };
74 const s8 fr[] = { -78, -80 };
75#endif
76 if (level < 0 || level > ARRAY_SIZE(sz)) {
77 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
78 "level out of range %d", level);
79 return;
80 }
81
82 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
83 AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
84 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
85 AR5K_PHY_AGCCOARSE_LO, lo[level]);
86 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
87 AR5K_PHY_AGCCOARSE_HI, hi[level]);
88 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
89 AR5K_PHY_SIG_FIRPWR, fr[level]);
90
91 ah->ah_sc->ani_state.noise_imm_level = level;
92 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
93}
94
95
96/**
97 * ath5k_ani_set_spur_immunity_level() - Set spur immunity level
98 *
99 * @level: level between 0 and @max_spur_level (the maximum level is dependent
100 * on the chip revision).
101 */
102void
103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
104{
105 const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
106
107 if (level < 0 || level > ARRAY_SIZE(val) ||
108 level > ah->ah_sc->ani_state.max_spur_level) {
109 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
110 "level out of range %d", level);
111 return;
112 }
113
114 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
115 AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
116
117 ah->ah_sc->ani_state.spur_level = level;
118 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
119}
120
121
122/**
123 * ath5k_ani_set_firstep_level() - Set "firstep" level
124 *
125 * @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
126 */
127void
128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
129{
130 const int val[] = { 0, 4, 8 };
131
132 if (level < 0 || level > ARRAY_SIZE(val)) {
133 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
134 "level out of range %d", level);
135 return;
136 }
137
138 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
139 AR5K_PHY_SIG_FIRSTEP, val[level]);
140
141 ah->ah_sc->ani_state.firstep_level = level;
142 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "new level %d", level);
143}
144
145
146/**
147 * ath5k_ani_set_ofdm_weak_signal_detection() - Control OFDM weak signal
148 * detection
149 *
150 * @on: turn on or off
151 */
152void
153ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
154{
155 const int m1l[] = { 127, 50 };
156 const int m2l[] = { 127, 40 };
157 const int m1[] = { 127, 0x4d };
158 const int m2[] = { 127, 0x40 };
159 const int m2cnt[] = { 31, 16 };
160 const int m2lcnt[] = { 63, 48 };
161
162 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
163 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
164 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
165 AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
166 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
167 AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
168 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
169 AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
170 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
171 AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
172 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
173 AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
174
175 if (on)
176 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
177 AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
178 else
179 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
180 AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
181
182 ah->ah_sc->ani_state.ofdm_weak_sig = on;
183 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
184 on ? "on" : "off");
185}
186
187
188/**
189 * ath5k_ani_set_cck_weak_signal_detection() - control CCK weak signal detection
190 *
191 * @on: turn on or off
192 */
193void
194ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
195{
196 const int val[] = { 8, 6 };
197 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
198 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
199 ah->ah_sc->ani_state.cck_weak_sig = on;
200 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "turned %s",
201 on ? "on" : "off");
202}
203
204
205/*** ANI algorithm ***/
206
207/**
208 * ath5k_ani_raise_immunity() - Increase noise immunity
209 *
210 * @ofdm_trigger: If this is true we are called because of too many OFDM errors,
211 * the algorithm will tune more parameters then.
212 *
213 * Try to raise noise immunity (=decrease sensitivity) in several steps
214 * depending on the average RSSI of the beacons we received.
215 */
216static void
217ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
218 bool ofdm_trigger)
219{
220 int rssi = ah->ah_beacon_rssi_avg.avg;
221
222 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
223 ofdm_trigger ? "ODFM" : "CCK");
224
225 /* first: raise noise immunity */
226 if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
227 ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
228 return;
229 }
230
231 /* only OFDM: raise spur immunity level */
232 if (ofdm_trigger &&
233 as->spur_level < ah->ah_sc->ani_state.max_spur_level) {
234 ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
235 return;
236 }
237
238 /* AP mode */
239 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
240 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
241 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
242 return;
243 }
244
245 /* STA and IBSS mode */
246
247 /* TODO: for IBSS mode it would be better to keep a beacon RSSI average
248 * per each neighbour node and use the minimum of these, to make sure we
249 * don't shut out a remote node by raising immunity too high. */
250
251 if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
252 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
253 "beacon RSSI high");
254 /* only OFDM: beacon RSSI is high, we can disable ODFM weak
255 * signal detection */
256 if (ofdm_trigger && as->ofdm_weak_sig == true) {
257 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
258 ath5k_ani_set_spur_immunity_level(ah, 0);
259 return;
260 }
261 /* as a last resort or CCK: raise firstep level */
262 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
263 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
264 return;
265 }
266 } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
267 /* beacon RSSI in mid range, we need OFDM weak signal detect,
268 * but can raise firstep level */
269 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
270 "beacon RSSI mid");
271 if (ofdm_trigger && as->ofdm_weak_sig == false)
272 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
273 if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
274 ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
275 return;
276 } else if (ah->ah_current_channel->band == IEEE80211_BAND_2GHZ) {
277 /* beacon RSSI is low. in B/G mode turn of OFDM weak signal
278 * detect and zero firstep level to maximize CCK sensitivity */
279 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
280 "beacon RSSI low, 2GHz");
281 if (ofdm_trigger && as->ofdm_weak_sig == true)
282 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
283 if (as->firstep_level > 0)
284 ath5k_ani_set_firstep_level(ah, 0);
285 return;
286 }
287
288 /* TODO: why not?:
289 if (as->cck_weak_sig == true) {
290 ath5k_ani_set_cck_weak_signal_detection(ah, false);
291 }
292 */
293}
294
295
296/**
297 * ath5k_ani_lower_immunity() - Decrease noise immunity
298 *
299 * Try to lower noise immunity (=increase sensitivity) in several steps
300 * depending on the average RSSI of the beacons we received.
301 */
302static void
303ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
304{
305 int rssi = ah->ah_beacon_rssi_avg.avg;
306
307 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
308
309 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP) {
310 /* AP mode */
311 if (as->firstep_level > 0) {
312 ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
313 return;
314 }
315 } else {
316 /* STA and IBSS mode (see TODO above) */
317 if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
318 /* beacon signal is high, leave OFDM weak signal
319 * detection off or it may oscillate
320 * TODO: who said it's off??? */
321 } else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
322 /* beacon RSSI is mid-range: turn on ODFM weak signal
323 * detection and next, lower firstep level */
324 if (as->ofdm_weak_sig == false) {
325 ath5k_ani_set_ofdm_weak_signal_detection(ah,
326 true);
327 return;
328 }
329 if (as->firstep_level > 0) {
330 ath5k_ani_set_firstep_level(ah,
331 as->firstep_level - 1);
332 return;
333 }
334 } else {
335 /* beacon signal is low: only reduce firstep level */
336 if (as->firstep_level > 0) {
337 ath5k_ani_set_firstep_level(ah,
338 as->firstep_level - 1);
339 return;
340 }
341 }
342 }
343
344 /* all modes */
345 if (as->spur_level > 0) {
346 ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
347 return;
348 }
349
350 /* finally, reduce noise immunity */
351 if (as->noise_imm_level > 0) {
352 ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
353 return;
354 }
355}
356
357
358/**
359 * ath5k_hw_ani_get_listen_time() - Calculate time spent listening
360 *
361 * Return an approximation of the time spent "listening" in milliseconds (ms)
362 * since the last call of this function by deducting the cycles spent
363 * transmitting and receiving from the total cycle count.
364 * Save profile count values for debugging/statistics and because we might want
365 * to use them later.
366 *
367 * We assume no one else clears these registers!
368 */
369static int
370ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
371{
372 int listen;
373
374 /* freeze */
375 ath5k_hw_reg_write(ah, AR5K_MIBC_FMC, AR5K_MIBC);
376 /* read */
377 as->pfc_cycles = ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE);
378 as->pfc_busy = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR);
379 as->pfc_tx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX);
380 as->pfc_rx = ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX);
381 /* clear */
382 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
383 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
384 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
385 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
386 /* un-freeze */
387 ath5k_hw_reg_write(ah, 0, AR5K_MIBC);
388
389 /* TODO: where does 44000 come from? (11g clock rate?) */
390 listen = (as->pfc_cycles - as->pfc_rx - as->pfc_tx) / 44000;
391
392 if (as->pfc_cycles == 0 || listen < 0)
393 return 0;
394 return listen;
395}
396
397
398/**
399 * ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
400 *
401 * Clear the PHY error counters as soon as possible, since this might be called
402 * from a MIB interrupt and we want to make sure we don't get interrupted again.
403 * Add the count of CCK and OFDM errors to our internal state, so it can be used
404 * by the algorithm later.
405 *
406 * Will be called from interrupt and tasklet context.
407 * Returns 0 if both counters are zero.
408 */
409static int
410ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
411 struct ath5k_ani_state *as)
412{
413 unsigned int ofdm_err, cck_err;
414
415 if (!ah->ah_capabilities.cap_has_phyerr_counters)
416 return 0;
417
418 ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
419 cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
420
421 /* reset counters first, we might be in a hurry (interrupt) */
422 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
423 AR5K_PHYERR_CNT1);
424 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
425 AR5K_PHYERR_CNT2);
426
427 ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
428 cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
429
430 /* sometimes both can be zero, especially when there is a superfluous
431 * second interrupt. detect that here and return an error. */
432 if (ofdm_err <= 0 && cck_err <= 0)
433 return 0;
434
435 /* avoid negative values should one of the registers overflow */
436 if (ofdm_err > 0) {
437 as->ofdm_errors += ofdm_err;
438 as->sum_ofdm_errors += ofdm_err;
439 }
440 if (cck_err > 0) {
441 as->cck_errors += cck_err;
442 as->sum_cck_errors += cck_err;
443 }
444 return 1;
445}
446
447
448/**
449 * ath5k_ani_period_restart() - Restart ANI period
450 *
451 * Just reset counters, so they are clear for the next "ani period".
452 */
453static void
454ath5k_ani_period_restart(struct ath5k_hw *ah, struct ath5k_ani_state *as)
455{
456 /* keep last values for debugging */
457 as->last_ofdm_errors = as->ofdm_errors;
458 as->last_cck_errors = as->cck_errors;
459 as->last_listen = as->listen_time;
460
461 as->ofdm_errors = 0;
462 as->cck_errors = 0;
463 as->listen_time = 0;
464}
465
466
467/**
468 * ath5k_ani_calibration() - The main ANI calibration function
469 *
470 * We count OFDM and CCK errors relative to the time where we did not send or
471 * receive ("listen" time) and raise or lower immunity accordingly.
472 * This is called regularly (every second) from the calibration timer, but also
473 * when an error threshold has been reached.
474 *
475 * In order to synchronize access from different contexts, this should be
476 * called only indirectly by scheduling the ANI tasklet!
477 */
478void
479ath5k_ani_calibration(struct ath5k_hw *ah)
480{
481 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
482 int listen, ofdm_high, ofdm_low, cck_high, cck_low;
483
484 if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
485 return;
486
487 /* get listen time since last call and add it to the counter because we
488 * might not have restarted the "ani period" last time */
489 listen = ath5k_hw_ani_get_listen_time(ah, as);
490 as->listen_time += listen;
491
492 ath5k_ani_save_and_clear_phy_errors(ah, as);
493
494 ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
495 cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
496 ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
497 cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
498
499 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
500 "listen %d (now %d)", as->listen_time, listen);
501 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
502 "check high ofdm %d/%d cck %d/%d",
503 as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
504
505 if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
506 /* too many PHY errors - we have to raise immunity */
507 bool ofdm_flag = as->ofdm_errors > ofdm_high ? true : false;
508 ath5k_ani_raise_immunity(ah, as, ofdm_flag);
509 ath5k_ani_period_restart(ah, as);
510
511 } else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
512 /* If more than 5 (TODO: why 5?) periods have passed and we got
513 * relatively little errors we can try to lower immunity */
514 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
515 "check low ofdm %d/%d cck %d/%d",
516 as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
517
518 if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
519 ath5k_ani_lower_immunity(ah, as);
520
521 ath5k_ani_period_restart(ah, as);
522 }
523}
524
525
526/*** INTERRUPT HANDLER ***/
527
528/**
529 * ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
530 *
531 * Just read & reset the registers quickly, so they don't generate more
532 * interrupts, save the counters and schedule the tasklet to decide whether
533 * to raise immunity or not.
534 *
535 * We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
536 * should take care of all "normal" MIB interrupts.
537 */
538void
539ath5k_ani_mib_intr(struct ath5k_hw *ah)
540{
541 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
542
543 /* nothing to do here if HW does not have PHY error counters - they
544 * can't be the reason for the MIB interrupt then */
545 if (!ah->ah_capabilities.cap_has_phyerr_counters)
546 return;
547
548 /* not in use but clear anyways */
549 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
550 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
551
552 if (ah->ah_sc->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
553 return;
554
555 /* if one of the errors triggered, we can get a superfluous second
556 * interrupt, even though we have already reset the register. the
557 * function detects that so we can return early */
558 if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
559 return;
560
561 if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
562 as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
563 tasklet_schedule(&ah->ah_sc->ani_tasklet);
564}
565
566
567/**
568 * ath5k_ani_phy_error_report() - Used by older HW to report PHY errors
569 *
570 * This is used by hardware without PHY error counters to report PHY errors
571 * on a frame-by-frame basis, instead of the interrupt.
572 */
573void
574ath5k_ani_phy_error_report(struct ath5k_hw *ah,
575 enum ath5k_phy_error_code phyerr)
576{
577 struct ath5k_ani_state *as = &ah->ah_sc->ani_state;
578
579 if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
580 as->ofdm_errors++;
581 if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
582 tasklet_schedule(&ah->ah_sc->ani_tasklet);
583 } else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
584 as->cck_errors++;
585 if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
586 tasklet_schedule(&ah->ah_sc->ani_tasklet);
587 }
588}
589
590
591/*** INIT ***/
592
593/**
594 * ath5k_enable_phy_err_counters() - Enable PHY error counters
595 *
596 * Enable PHY error counters for OFDM and CCK timing errors.
597 */
598static void
599ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
600{
601 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
602 AR5K_PHYERR_CNT1);
603 ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
604 AR5K_PHYERR_CNT2);
605 ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
606 ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
607
608 /* not in use */
609 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
610 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
611}
612
613
614/**
615 * ath5k_disable_phy_err_counters() - Disable PHY error counters
616 *
617 * Disable PHY error counters for OFDM and CCK timing errors.
618 */
619static void
620ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
621{
622 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
623 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
624 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
625 ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
626
627 /* not in use */
628 ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
629 ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
630}
631
632
633/**
634 * ath5k_ani_init() - Initialize ANI
635 * @mode: Which mode to use (auto, manual high, manual low, off)
636 *
637 * Initialize ANI according to mode.
638 */
639void
640ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
641{
642 /* ANI is only possible on 5212 and newer */
643 if (ah->ah_version < AR5K_AR5212)
644 return;
645
646 /* clear old state information */
647 memset(&ah->ah_sc->ani_state, 0, sizeof(ah->ah_sc->ani_state));
648
649 /* older hardware has more spur levels than newer */
650 if (ah->ah_mac_srev < AR5K_SREV_AR2414)
651 ah->ah_sc->ani_state.max_spur_level = 7;
652 else
653 ah->ah_sc->ani_state.max_spur_level = 2;
654
655 /* initial values for our ani parameters */
656 if (mode == ATH5K_ANI_MODE_OFF) {
657 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI off\n");
658 } else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
659 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
660 "ANI manual low -> high sensitivity\n");
661 ath5k_ani_set_noise_immunity_level(ah, 0);
662 ath5k_ani_set_spur_immunity_level(ah, 0);
663 ath5k_ani_set_firstep_level(ah, 0);
664 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
665 ath5k_ani_set_cck_weak_signal_detection(ah, true);
666 } else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
667 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI,
668 "ANI manual high -> low sensitivity\n");
669 ath5k_ani_set_noise_immunity_level(ah,
670 ATH5K_ANI_MAX_NOISE_IMM_LVL);
671 ath5k_ani_set_spur_immunity_level(ah,
672 ah->ah_sc->ani_state.max_spur_level);
673 ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
674 ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
675 ath5k_ani_set_cck_weak_signal_detection(ah, false);
676 } else if (mode == ATH5K_ANI_MODE_AUTO) {
677 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "ANI auto\n");
678 ath5k_ani_set_noise_immunity_level(ah, 0);
679 ath5k_ani_set_spur_immunity_level(ah, 0);
680 ath5k_ani_set_firstep_level(ah, 0);
681 ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
682 ath5k_ani_set_cck_weak_signal_detection(ah, false);
683 }
684
685 /* newer hardware has PHY error counter registers which we can use to
686 * get OFDM and CCK error counts. older hardware has to set rxfilter and
687 * report every single PHY error by calling ath5k_ani_phy_error_report()
688 */
689 if (mode == ATH5K_ANI_MODE_AUTO) {
690 if (ah->ah_capabilities.cap_has_phyerr_counters)
691 ath5k_enable_phy_err_counters(ah);
692 else
693 ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
694 AR5K_RX_FILTER_PHYERR);
695 } else {
696 if (ah->ah_capabilities.cap_has_phyerr_counters)
697 ath5k_disable_phy_err_counters(ah);
698 else
699 ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
700 ~AR5K_RX_FILTER_PHYERR);
701 }
702
703 ah->ah_sc->ani_state.ani_mode = mode;
704}
705
706
707/*** DEBUG ***/
708
709#ifdef CONFIG_ATH5K_DEBUG
710
711void
712ath5k_ani_print_counters(struct ath5k_hw *ah)
713{
714 /* clears too */
715 printk(KERN_NOTICE "ACK fail\t%d\n",
716 ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
717 printk(KERN_NOTICE "RTS fail\t%d\n",
718 ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
719 printk(KERN_NOTICE "RTS success\t%d\n",
720 ath5k_hw_reg_read(ah, AR5K_RTS_OK));
721 printk(KERN_NOTICE "FCS error\t%d\n",
722 ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
723
724 /* no clear */
725 printk(KERN_NOTICE "tx\t%d\n",
726 ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
727 printk(KERN_NOTICE "rx\t%d\n",
728 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
729 printk(KERN_NOTICE "busy\t%d\n",
730 ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
731 printk(KERN_NOTICE "cycles\t%d\n",
732 ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
733
734 printk(KERN_NOTICE "AR5K_PHYERR_CNT1\t%d\n",
735 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
736 printk(KERN_NOTICE "AR5K_PHYERR_CNT2\t%d\n",
737 ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
738 printk(KERN_NOTICE "AR5K_OFDM_FIL_CNT\t%d\n",
739 ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
740 printk(KERN_NOTICE "AR5K_CCK_FIL_CNT\t%d\n",
741 ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
742}
743
744#endif
diff --git a/drivers/net/wireless/ath/ath5k/ani.h b/drivers/net/wireless/ath/ath5k/ani.h
new file mode 100644
index 000000000000..55cf26d8522c
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ani.h
@@ -0,0 +1,104 @@
1/*
2 * Copyright (C) 2010 Bruno Randolf <br1@einfach.org>
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16#ifndef ANI_H
17#define ANI_H
18
19/* these thresholds are relative to the ATH5K_ANI_LISTEN_PERIOD */
20#define ATH5K_ANI_LISTEN_PERIOD 100
21#define ATH5K_ANI_OFDM_TRIG_HIGH 500
22#define ATH5K_ANI_OFDM_TRIG_LOW 200
23#define ATH5K_ANI_CCK_TRIG_HIGH 200
24#define ATH5K_ANI_CCK_TRIG_LOW 100
25
26/* average beacon RSSI thresholds */
27#define ATH5K_ANI_RSSI_THR_HIGH 40
28#define ATH5K_ANI_RSSI_THR_LOW 7
29
30/* maximum availabe levels */
31#define ATH5K_ANI_MAX_FIRSTEP_LVL 2
32#define ATH5K_ANI_MAX_NOISE_IMM_LVL 1
33
34
35/**
36 * enum ath5k_ani_mode - mode for ANI / noise sensitivity
37 *
38 * @ATH5K_ANI_MODE_OFF: Turn ANI off. This can be useful to just stop the ANI
39 * algorithm after it has been on auto mode.
40 * ATH5K_ANI_MODE_MANUAL_LOW: Manually set all immunity parameters to low,
41 * maximizing sensitivity. ANI will not run.
42 * ATH5K_ANI_MODE_MANUAL_HIGH: Manually set all immunity parameters to high,
43 * minimizing sensitivity. ANI will not run.
44 * ATH5K_ANI_MODE_AUTO: Automatically control immunity parameters based on the
45 * amount of OFDM and CCK frame errors (default).
46 */
47enum ath5k_ani_mode {
48 ATH5K_ANI_MODE_OFF = 0,
49 ATH5K_ANI_MODE_MANUAL_LOW = 1,
50 ATH5K_ANI_MODE_MANUAL_HIGH = 2,
51 ATH5K_ANI_MODE_AUTO = 3
52};
53
54
55/**
56 * struct ath5k_ani_state - ANI state and associated counters
57 *
58 * @max_spur_level: the maximum spur level is chip dependent
59 */
60struct ath5k_ani_state {
61 enum ath5k_ani_mode ani_mode;
62
63 /* state */
64 int noise_imm_level;
65 int spur_level;
66 int firstep_level;
67 bool ofdm_weak_sig;
68 bool cck_weak_sig;
69
70 int max_spur_level;
71
72 /* used by the algorithm */
73 unsigned int listen_time;
74 unsigned int ofdm_errors;
75 unsigned int cck_errors;
76
77 /* debug/statistics only: numbers from last ANI calibration */
78 unsigned int pfc_tx;
79 unsigned int pfc_rx;
80 unsigned int pfc_busy;
81 unsigned int pfc_cycles;
82 unsigned int last_listen;
83 unsigned int last_ofdm_errors;
84 unsigned int last_cck_errors;
85 unsigned int sum_ofdm_errors;
86 unsigned int sum_cck_errors;
87};
88
89void ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode);
90void ath5k_ani_mib_intr(struct ath5k_hw *ah);
91void ath5k_ani_calibration(struct ath5k_hw *ah);
92void ath5k_ani_phy_error_report(struct ath5k_hw *ah,
93 enum ath5k_phy_error_code phyerr);
94
95/* for manual control */
96void ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level);
97void ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level);
98void ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level);
99void ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on);
100void ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on);
101
102void ath5k_ani_print_counters(struct ath5k_hw *ah);
103
104#endif /* ANI_H */
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ac67f02e26d8..2785946f659a 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -202,7 +202,8 @@
202#define AR5K_TUNE_MAX_TXPOWER 63 202#define AR5K_TUNE_MAX_TXPOWER 63
203#define AR5K_TUNE_DEFAULT_TXPOWER 25 203#define AR5K_TUNE_DEFAULT_TXPOWER 25
204#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
205#define AR5K_TUNE_HWTXTRIES 4 205#define ATH5K_TUNE_CALIBRATION_INTERVAL_FULL 10000 /* 10 sec */
206#define ATH5K_TUNE_CALIBRATION_INTERVAL_ANI 1000 /* 1 sec */
206 207
207#define AR5K_INIT_CARR_SENSE_EN 1 208#define AR5K_INIT_CARR_SENSE_EN 1
208 209
@@ -614,28 +615,6 @@ struct ath5k_rx_status {
614#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/ 615#define AR5K_BEACON_ENA 0x00800000 /*enable beacon xmit*/
615#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/ 616#define AR5K_BEACON_RESET_TSF 0x01000000 /*force a TSF reset*/
616 617
617#if 0
618/**
619 * struct ath5k_beacon_state - Per-station beacon timer state.
620 * @bs_interval: in TU's, can also include the above flags
621 * @bs_cfp_max_duration: if non-zero hw is setup to coexist with a
622 * Point Coordination Function capable AP
623 */
624struct ath5k_beacon_state {
625 u32 bs_next_beacon;
626 u32 bs_next_dtim;
627 u32 bs_interval;
628 u8 bs_dtim_period;
629 u8 bs_cfp_period;
630 u16 bs_cfp_max_duration;
631 u16 bs_cfp_du_remain;
632 u16 bs_tim_offset;
633 u16 bs_sleep_duration;
634 u16 bs_bmiss_threshold;
635 u32 bs_cfp_next;
636};
637#endif
638
639 618
640/* 619/*
641 * TSF to TU conversion: 620 * TSF to TU conversion:
@@ -822,9 +801,9 @@ struct ath5k_athchan_2ghz {
822 * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold 801 * @AR5K_INT_TXURN: received when we should increase the TX trigger threshold
823 * We currently do increments on interrupt by 802 * We currently do increments on interrupt by
824 * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2 803 * (AR5K_TUNE_MAX_TX_FIFO_THRES - current_trigger_level) / 2
825 * @AR5K_INT_MIB: Indicates the Management Information Base counters should be 804 * @AR5K_INT_MIB: Indicates the either Management Information Base counters or
826 * checked. We should do this with ath5k_hw_update_mib_counters() but 805 * one of the PHY error counters reached the maximum value and should be
827 * it seems we should also then do some noise immunity work. 806 * read and cleared.
828 * @AR5K_INT_RXPHY: RX PHY Error 807 * @AR5K_INT_RXPHY: RX PHY Error
829 * @AR5K_INT_RXKCM: RX Key cache miss 808 * @AR5K_INT_RXKCM: RX Key cache miss
830 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a 809 * @AR5K_INT_SWBA: SoftWare Beacon Alert - indicates its time to send a
@@ -912,10 +891,11 @@ enum ath5k_int {
912 AR5K_INT_NOCARD = 0xffffffff 891 AR5K_INT_NOCARD = 0xffffffff
913}; 892};
914 893
915/* Software interrupts used for calibration */ 894/* mask which calibration is active at the moment */
916enum ath5k_software_interrupt { 895enum ath5k_calibration_mask {
917 AR5K_SWI_FULL_CALIBRATION = 0x01, 896 AR5K_CALIBRATION_FULL = 0x01,
918 AR5K_SWI_SHORT_CALIBRATION = 0x02, 897 AR5K_CALIBRATION_SHORT = 0x02,
898 AR5K_CALIBRATION_ANI = 0x04,
919}; 899};
920 900
921/* 901/*
@@ -1004,6 +984,8 @@ struct ath5k_capabilities {
1004 struct { 984 struct {
1005 u8 q_tx_num; 985 u8 q_tx_num;
1006 } cap_queues; 986 } cap_queues;
987
988 bool cap_has_phyerr_counters;
1007}; 989};
1008 990
1009/* size of noise floor history (keep it a power of two) */ 991/* size of noise floor history (keep it a power of two) */
@@ -1014,6 +996,15 @@ struct ath5k_nfcal_hist
1014 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */ 996 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
1015}; 997};
1016 998
999/**
1000 * struct avg_val - Helper structure for average calculation
1001 * @avg: contains the actual average value
1002 * @avg_weight: is used internally during calculation to prevent rounding errors
1003 */
1004struct ath5k_avg_val {
1005 int avg;
1006 int avg_weight;
1007};
1017 1008
1018/***************************************\ 1009/***************************************\
1019 HARDWARE ABSTRACTION LAYER STRUCTURE 1010 HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1028,7 +1019,6 @@ struct ath5k_nfcal_hist
1028 1019
1029/* TODO: Clean up and merge with ath5k_softc */ 1020/* TODO: Clean up and merge with ath5k_softc */
1030struct ath5k_hw { 1021struct ath5k_hw {
1031 u32 ah_magic;
1032 struct ath_common common; 1022 struct ath_common common;
1033 1023
1034 struct ath5k_softc *ah_sc; 1024 struct ath5k_softc *ah_sc;
@@ -1036,7 +1026,6 @@ struct ath5k_hw {
1036 1026
1037 enum ath5k_int ah_imr; 1027 enum ath5k_int ah_imr;
1038 1028
1039 enum nl80211_iftype ah_op_mode;
1040 struct ieee80211_channel *ah_current_channel; 1029 struct ieee80211_channel *ah_current_channel;
1041 bool ah_turbo; 1030 bool ah_turbo;
1042 bool ah_calibration; 1031 bool ah_calibration;
@@ -1049,7 +1038,6 @@ struct ath5k_hw {
1049 u32 ah_phy; 1038 u32 ah_phy;
1050 u32 ah_mac_srev; 1039 u32 ah_mac_srev;
1051 u16 ah_mac_version; 1040 u16 ah_mac_version;
1052 u16 ah_mac_revision;
1053 u16 ah_phy_revision; 1041 u16 ah_phy_revision;
1054 u16 ah_radio_5ghz_revision; 1042 u16 ah_radio_5ghz_revision;
1055 u16 ah_radio_2ghz_revision; 1043 u16 ah_radio_2ghz_revision;
@@ -1071,8 +1059,6 @@ struct ath5k_hw {
1071 u8 ah_def_ant; 1059 u8 ah_def_ant;
1072 bool ah_software_retry; 1060 bool ah_software_retry;
1073 1061
1074 int ah_gpio_npins;
1075
1076 struct ath5k_capabilities ah_capabilities; 1062 struct ath5k_capabilities ah_capabilities;
1077 1063
1078 struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES]; 1064 struct ath5k_txq_info ah_txq[AR5K_NUM_TX_QUEUES];
@@ -1123,17 +1109,18 @@ struct ath5k_hw {
1123 1109
1124 struct ath5k_nfcal_hist ah_nfcal_hist; 1110 struct ath5k_nfcal_hist ah_nfcal_hist;
1125 1111
1112 /* average beacon RSSI in our BSS (used by ANI) */
1113 struct ath5k_avg_val ah_beacon_rssi_avg;
1114
1126 /* noise floor from last periodic calibration */ 1115 /* noise floor from last periodic calibration */
1127 s32 ah_noise_floor; 1116 s32 ah_noise_floor;
1128 1117
1129 /* Calibration timestamp */ 1118 /* Calibration timestamp */
1130 unsigned long ah_cal_tstamp; 1119 unsigned long ah_cal_next_full;
1131 1120 unsigned long ah_cal_next_ani;
1132 /* Calibration interval (secs) */
1133 u8 ah_cal_intval;
1134 1121
1135 /* Software interrupt mask */ 1122 /* Calibration mask */
1136 u8 ah_swi_mask; 1123 u8 ah_cal_mask;
1137 1124
1138 /* 1125 /*
1139 * Function pointers 1126 * Function pointers
@@ -1141,9 +1128,9 @@ struct ath5k_hw {
1141 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc, 1128 int (*ah_setup_rx_desc)(struct ath5k_hw *ah, struct ath5k_desc *desc,
1142 u32 size, unsigned int flags); 1129 u32 size, unsigned int flags);
1143 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1130 int (*ah_setup_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1144 unsigned int, unsigned int, enum ath5k_pkt_type, unsigned int, 1131 unsigned int, unsigned int, int, enum ath5k_pkt_type,
1145 unsigned int, unsigned int, unsigned int, unsigned int, 1132 unsigned int, unsigned int, unsigned int, unsigned int,
1146 unsigned int, unsigned int, unsigned int); 1133 unsigned int, unsigned int, unsigned int, unsigned int);
1147 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *, 1134 int (*ah_setup_mrr_tx_desc)(struct ath5k_hw *, struct ath5k_desc *,
1148 unsigned int, unsigned int, unsigned int, unsigned int, 1135 unsigned int, unsigned int, unsigned int, unsigned int,
1149 unsigned int, unsigned int); 1136 unsigned int, unsigned int);
@@ -1158,158 +1145,145 @@ struct ath5k_hw {
1158 */ 1145 */
1159 1146
1160/* Attach/Detach Functions */ 1147/* Attach/Detach Functions */
1161extern int ath5k_hw_attach(struct ath5k_softc *sc); 1148int ath5k_hw_attach(struct ath5k_softc *sc);
1162extern void ath5k_hw_detach(struct ath5k_hw *ah); 1149void ath5k_hw_detach(struct ath5k_hw *ah);
1163 1150
1164/* LED functions */ 1151/* LED functions */
1165extern int ath5k_init_leds(struct ath5k_softc *sc); 1152int ath5k_init_leds(struct ath5k_softc *sc);
1166extern void ath5k_led_enable(struct ath5k_softc *sc); 1153void ath5k_led_enable(struct ath5k_softc *sc);
1167extern void ath5k_led_off(struct ath5k_softc *sc); 1154void ath5k_led_off(struct ath5k_softc *sc);
1168extern void ath5k_unregister_leds(struct ath5k_softc *sc); 1155void ath5k_unregister_leds(struct ath5k_softc *sc);
1169 1156
1170/* Reset Functions */ 1157/* Reset Functions */
1171extern int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial); 1158int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
1172extern int ath5k_hw_on_hold(struct ath5k_hw *ah); 1159int ath5k_hw_on_hold(struct ath5k_hw *ah);
1173extern int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, struct ieee80211_channel *channel, bool change_channel); 1160int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1161 struct ieee80211_channel *channel, bool change_channel);
1162int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
1163 bool is_set);
1174/* Power management functions */ 1164/* Power management functions */
1175extern int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, bool set_chip, u16 sleep_duration);
1176 1165
1177/* DMA Related Functions */ 1166/* DMA Related Functions */
1178extern void ath5k_hw_start_rx_dma(struct ath5k_hw *ah); 1167void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
1179extern int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah); 1168int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
1180extern u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah); 1169u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
1181extern void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr); 1170void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
1182extern int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1171int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1183extern int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1172int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1184extern u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue); 1173u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
1185extern int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, 1174int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
1186 u32 phys_addr); 1175 u32 phys_addr);
1187extern int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase); 1176int ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase);
1188/* Interrupt handling */ 1177/* Interrupt handling */
1189extern bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah); 1178bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1190extern int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1179int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1191extern enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum 1180enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
1192ath5k_int new_mask); 1181void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
1193extern void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, struct ieee80211_low_level_stats *stats);
1194 1182
1195/* EEPROM access functions */ 1183/* EEPROM access functions */
1196extern int ath5k_eeprom_init(struct ath5k_hw *ah); 1184int ath5k_eeprom_init(struct ath5k_hw *ah);
1197extern void ath5k_eeprom_detach(struct ath5k_hw *ah); 1185void ath5k_eeprom_detach(struct ath5k_hw *ah);
1198extern int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1186int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1199extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1200 1187
1201/* Protocol Control Unit Functions */ 1188/* Protocol Control Unit Functions */
1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1189extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
1203extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); 1190void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1204/* BSSID Functions */ 1191/* BSSID Functions */
1205extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1192int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1206extern void ath5k_hw_set_associd(struct ath5k_hw *ah); 1193void ath5k_hw_set_associd(struct ath5k_hw *ah);
1207extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1194void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1208/* Receive start/stop functions */ 1195/* Receive start/stop functions */
1209extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1196void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1210extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); 1197void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1211/* RX Filter functions */ 1198/* RX Filter functions */
1212extern void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); 1199void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
1213extern int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1200u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1214extern int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index); 1201void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1215extern u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1216extern void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1217/* Beacon control functions */ 1202/* Beacon control functions */
1218extern u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah); 1203u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1219extern u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1204void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1220extern void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); 1205void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1221extern void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1206void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1222extern void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1223#if 0
1224extern int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah, const struct ath5k_beacon_state *state);
1225extern void ath5k_hw_reset_beacon(struct ath5k_hw *ah);
1226extern int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr);
1227#endif
1228/* ACK bit rate */ 1207/* ACK bit rate */
1229void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); 1208void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high);
1230/* ACK/CTS Timeouts */
1231extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
1232extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
1233extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
1234extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
1235/* Clock rate related functions */ 1209/* Clock rate related functions */
1236unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec); 1210unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1237unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock); 1211unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1238unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah); 1212unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
1239/* Key table (WEP) functions */ 1213/* Key table (WEP) functions */
1240extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry); 1214int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
1241extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry); 1215int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry,
1242extern int ath5k_hw_set_key(struct ath5k_hw *ah, u16 entry, const struct ieee80211_key_conf *key, const u8 *mac); 1216 const struct ieee80211_key_conf *key, const u8 *mac);
1243extern int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac); 1217int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac);
1244 1218
1245/* Queue Control Unit, DFS Control Unit Functions */ 1219/* Queue Control Unit, DFS Control Unit Functions */
1246extern int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, struct ath5k_txq_info *queue_info); 1220int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
1247extern int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 1221 struct ath5k_txq_info *queue_info);
1248 const struct ath5k_txq_info *queue_info); 1222int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
1249extern int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, 1223 const struct ath5k_txq_info *queue_info);
1250 enum ath5k_tx_queue queue_type, 1224int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1251 struct ath5k_txq_info *queue_info); 1225 enum ath5k_tx_queue queue_type,
1252extern u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); 1226 struct ath5k_txq_info *queue_info);
1253extern void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1227u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1254extern int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1228void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1255extern unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah); 1229int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1256extern int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time); 1230int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time);
1257 1231
1258/* Hardware Descriptor Functions */ 1232/* Hardware Descriptor Functions */
1259extern int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); 1233int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
1260 1234
1261/* GPIO Functions */ 1235/* GPIO Functions */
1262extern void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1236void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
1263extern int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); 1237int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
1264extern int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio); 1238int ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio);
1265extern u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio); 1239u32 ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio);
1266extern int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val); 1240int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1267extern void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, u32 interrupt_level); 1241void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
1242 u32 interrupt_level);
1268 1243
1269/* rfkill Functions */ 1244/* rfkill Functions */
1270extern void ath5k_rfkill_hw_start(struct ath5k_hw *ah); 1245void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
1271extern void ath5k_rfkill_hw_stop(struct ath5k_hw *ah); 1246void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
1272 1247
1273/* Misc functions */ 1248/* Misc functions */
1274int ath5k_hw_set_capabilities(struct ath5k_hw *ah); 1249int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1275extern int ath5k_hw_get_capability(struct ath5k_hw *ah, enum ath5k_capability_type cap_type, u32 capability, u32 *result); 1250int ath5k_hw_get_capability(struct ath5k_hw *ah,
1276extern int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id); 1251 enum ath5k_capability_type cap_type, u32 capability,
1277extern int ath5k_hw_disable_pspoll(struct ath5k_hw *ah); 1252 u32 *result);
1253int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
1254int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1278 1255
1279/* Initial register settings functions */ 1256/* Initial register settings functions */
1280extern int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1257int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1281 1258
1282/* Initialize RF */ 1259/* Initialize RF */
1283extern int ath5k_hw_rfregs_init(struct ath5k_hw *ah, 1260int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
1284 struct ieee80211_channel *channel, 1261 struct ieee80211_channel *channel,
1285 unsigned int mode); 1262 unsigned int mode);
1286extern int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq); 1263int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq);
1287extern enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); 1264enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
1288extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah); 1265int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1289/* PHY/RF channel functions */ 1266/* PHY/RF channel functions */
1290extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1267bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1291extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1268int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1292/* PHY calibration */ 1269/* PHY calibration */
1293void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); 1270void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1294extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1271int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1295extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1272 struct ieee80211_channel *channel);
1296extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
1297extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
1298/* Spur mitigation */ 1273/* Spur mitigation */
1299bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1274bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
1300 struct ieee80211_channel *channel); 1275 struct ieee80211_channel *channel);
1301void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, 1276void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1302 struct ieee80211_channel *channel); 1277 struct ieee80211_channel *channel);
1303/* Misc PHY functions */ 1278/* Misc PHY functions */
1304extern u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan); 1279u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1305extern int ath5k_hw_phy_disable(struct ath5k_hw *ah); 1280int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1306/* Antenna control */ 1281/* Antenna control */
1307extern void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); 1282void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
1308extern void ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant);
1309extern unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah);
1310/* TX power setup */ 1283/* TX power setup */
1311extern int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, u8 ee_mode, u8 txpower); 1284int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1312extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower); 1285 u8 ee_mode, u8 txpower);
1286int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1313 1287
1314/* 1288/*
1315 * Functions used internaly 1289 * Functions used internaly
@@ -1335,29 +1309,6 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1335 iowrite32(val, ah->ah_iobase + reg); 1309 iowrite32(val, ah->ah_iobase + reg);
1336} 1310}
1337 1311
1338#if defined(_ATH5K_RESET) || defined(_ATH5K_PHY)
1339/*
1340 * Check if a register write has been completed
1341 */
1342static int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag,
1343 u32 val, bool is_set)
1344{
1345 int i;
1346 u32 data;
1347
1348 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
1349 data = ath5k_hw_reg_read(ah, reg);
1350 if (is_set && (data & flag))
1351 break;
1352 else if ((data & flag) == val)
1353 break;
1354 udelay(15);
1355 }
1356
1357 return (i <= 0) ? -EAGAIN : 0;
1358}
1359#endif
1360
1361static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) 1312static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1362{ 1313{
1363 u32 retval = 0, bit, i; 1314 u32 retval = 0, bit, i;
@@ -1370,9 +1321,27 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1370 return retval; 1321 return retval;
1371} 1322}
1372 1323
1373static inline int ath5k_pad_size(int hdrlen) 1324#define AVG_SAMPLES 8
1325#define AVG_FACTOR 1000
1326
1327/**
1328 * ath5k_moving_average - Exponentially weighted moving average
1329 * @avg: average structure
1330 * @val: current value
1331 *
1332 * This implementation make use of a struct ath5k_avg_val to prevent rounding
1333 * errors.
1334 */
1335static inline struct ath5k_avg_val
1336ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
1374{ 1337{
1375 return (hdrlen < 24) ? 0 : hdrlen & 3; 1338 struct ath5k_avg_val new;
1339 new.avg_weight = avg.avg_weight ?
1340 (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
1341 (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
1342 (val * (AVG_FACTOR));
1343 new.avg = new.avg_weight / (AVG_FACTOR);
1344 return new;
1376} 1345}
1377 1346
1378#endif 1347#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index dc0786cc2639..e0c244b02f05 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -114,7 +114,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
114 /* 114 /*
115 * HW information 115 * HW information
116 */ 116 */
117 ah->ah_op_mode = NL80211_IFTYPE_STATION;
118 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT; 117 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
119 ah->ah_turbo = false; 118 ah->ah_turbo = false;
120 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
@@ -124,6 +123,9 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
124 ah->ah_cw_min = AR5K_TUNE_CWMIN; 123 ah->ah_cw_min = AR5K_TUNE_CWMIN;
125 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 124 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
126 ah->ah_software_retry = false; 125 ah->ah_software_retry = false;
126 ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
127 ah->ah_noise_floor = -95; /* until first NF calibration is run */
128 sc->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
127 129
128 /* 130 /*
129 * Find the mac version 131 * Find the mac version
@@ -149,7 +151,6 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
149 /* Get MAC, PHY and RADIO revisions */ 151 /* Get MAC, PHY and RADIO revisions */
150 ah->ah_mac_srev = srev; 152 ah->ah_mac_srev = srev;
151 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER); 153 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
152 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
153 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 154 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
154 0xffffffff; 155 0xffffffff;
155 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, 156 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -328,7 +329,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
328 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 329 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
329 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN); 330 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
330 ath5k_hw_set_associd(ah); 331 ath5k_hw_set_associd(ah);
331 ath5k_hw_set_opmode(ah); 332 ath5k_hw_set_opmode(ah, sc->opmode);
332 333
333 ath5k_hw_rfgain_opt_init(ah); 334 ath5k_hw_rfgain_opt_init(ah);
334 335
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 3abbe7513ab5..93005f1d326d 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -59,8 +59,8 @@
59#include "base.h" 59#include "base.h"
60#include "reg.h" 60#include "reg.h"
61#include "debug.h" 61#include "debug.h"
62#include "ani.h"
62 63
63static u8 ath5k_calinterval = 10; /* Calibrate PHY every 10 secs (TODO: Fixme) */
64static int modparam_nohwcrypt; 64static int modparam_nohwcrypt;
65module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 65module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
@@ -199,7 +199,7 @@ static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
199static int ath5k_pci_suspend(struct device *dev); 199static int ath5k_pci_suspend(struct device *dev);
200static int ath5k_pci_resume(struct device *dev); 200static int ath5k_pci_resume(struct device *dev);
201 201
202SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume); 202static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
203#define ATH5K_PM_OPS (&ath5k_pm_ops) 203#define ATH5K_PM_OPS (&ath5k_pm_ops)
204#else 204#else
205#define ATH5K_PM_OPS NULL 205#define ATH5K_PM_OPS NULL
@@ -231,7 +231,7 @@ static void ath5k_remove_interface(struct ieee80211_hw *hw,
231 struct ieee80211_vif *vif); 231 struct ieee80211_vif *vif);
232static int ath5k_config(struct ieee80211_hw *hw, u32 changed); 232static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 233static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
234 int mc_count, struct dev_addr_list *mc_list); 234 struct netdev_hw_addr_list *mc_list);
235static void ath5k_configure_filter(struct ieee80211_hw *hw, 235static void ath5k_configure_filter(struct ieee80211_hw *hw,
236 unsigned int changed_flags, 236 unsigned int changed_flags,
237 unsigned int *new_flags, 237 unsigned int *new_flags,
@@ -308,7 +308,7 @@ static int ath5k_rxbuf_setup(struct ath5k_softc *sc,
308 struct ath5k_buf *bf); 308 struct ath5k_buf *bf);
309static int ath5k_txbuf_setup(struct ath5k_softc *sc, 309static int ath5k_txbuf_setup(struct ath5k_softc *sc,
310 struct ath5k_buf *bf, 310 struct ath5k_buf *bf,
311 struct ath5k_txq *txq); 311 struct ath5k_txq *txq, int padsize);
312static inline void ath5k_txbuf_free(struct ath5k_softc *sc, 312static inline void ath5k_txbuf_free(struct ath5k_softc *sc,
313 struct ath5k_buf *bf) 313 struct ath5k_buf *bf)
314{ 314{
@@ -365,6 +365,7 @@ static void ath5k_beacon_send(struct ath5k_softc *sc);
365static void ath5k_beacon_config(struct ath5k_softc *sc); 365static void ath5k_beacon_config(struct ath5k_softc *sc);
366static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 366static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
367static void ath5k_tasklet_beacon(unsigned long data); 367static void ath5k_tasklet_beacon(unsigned long data);
368static void ath5k_tasklet_ani(unsigned long data);
368 369
369static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 370static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
370{ 371{
@@ -830,6 +831,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
830 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc); 831 tasklet_init(&sc->restq, ath5k_tasklet_reset, (unsigned long)sc);
831 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc); 832 tasklet_init(&sc->calib, ath5k_tasklet_calibrate, (unsigned long)sc);
832 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc); 833 tasklet_init(&sc->beacontq, ath5k_tasklet_beacon, (unsigned long)sc);
834 tasklet_init(&sc->ani_tasklet, ath5k_tasklet_ani, (unsigned long)sc);
833 835
834 ret = ath5k_eeprom_read_mac(ah, mac); 836 ret = ath5k_eeprom_read_mac(ah, mac);
835 if (ret) { 837 if (ret) {
@@ -1138,8 +1140,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1138 struct ath5k_hw *ah = sc->ah; 1140 struct ath5k_hw *ah = sc->ah;
1139 u32 rfilt; 1141 u32 rfilt;
1140 1142
1141 ah->ah_op_mode = sc->opmode;
1142
1143 /* configure rx filter */ 1143 /* configure rx filter */
1144 rfilt = sc->filter_flags; 1144 rfilt = sc->filter_flags;
1145 ath5k_hw_set_rx_filter(ah, rfilt); 1145 ath5k_hw_set_rx_filter(ah, rfilt);
@@ -1148,8 +1148,9 @@ ath5k_mode_setup(struct ath5k_softc *sc)
1148 ath5k_hw_set_bssid_mask(ah, sc->bssidmask); 1148 ath5k_hw_set_bssid_mask(ah, sc->bssidmask);
1149 1149
1150 /* configure operational mode */ 1150 /* configure operational mode */
1151 ath5k_hw_set_opmode(ah); 1151 ath5k_hw_set_opmode(ah, sc->opmode);
1152 1152
1153 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d\n", sc->opmode);
1153 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt); 1154 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
1154} 1155}
1155 1156
@@ -1272,7 +1273,7 @@ static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1272 1273
1273static int 1274static int
1274ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf, 1275ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1275 struct ath5k_txq *txq) 1276 struct ath5k_txq *txq, int padsize)
1276{ 1277{
1277 struct ath5k_hw *ah = sc->ah; 1278 struct ath5k_hw *ah = sc->ah;
1278 struct ath5k_desc *ds = bf->desc; 1279 struct ath5k_desc *ds = bf->desc;
@@ -1324,7 +1325,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
1324 sc->vif, pktlen, info)); 1325 sc->vif, pktlen, info));
1325 } 1326 }
1326 ret = ah->ah_setup_tx_desc(ah, ds, pktlen, 1327 ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
1327 ieee80211_get_hdrlen_from_skb(skb), 1328 ieee80211_get_hdrlen_from_skb(skb), padsize,
1328 get_hw_packet_type(skb), 1329 get_hw_packet_type(skb),
1329 (sc->power_level * 2), 1330 (sc->power_level * 2),
1330 hw_rate, 1331 hw_rate,
@@ -1636,7 +1637,6 @@ ath5k_txq_cleanup(struct ath5k_softc *sc)
1636 sc->txqs[i].link); 1637 sc->txqs[i].link);
1637 } 1638 }
1638 } 1639 }
1639 ieee80211_wake_queues(sc->hw); /* XXX move to callers */
1640 1640
1641 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) 1641 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1642 if (sc->txqs[i].setup) 1642 if (sc->txqs[i].setup)
@@ -1807,6 +1807,86 @@ ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1807} 1807}
1808 1808
1809static void 1809static void
1810ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1811{
1812 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1813 struct ath5k_hw *ah = sc->ah;
1814 struct ath_common *common = ath5k_hw_common(ah);
1815
1816 /* only beacons from our BSSID */
1817 if (!ieee80211_is_beacon(mgmt->frame_control) ||
1818 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1819 return;
1820
1821 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg,
1822 rssi);
1823
1824 /* in IBSS mode we should keep RSSI statistics per neighbour */
1825 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
1826}
1827
1828/*
1829 * Compute padding position. skb must contains an IEEE 802.11 frame
1830 */
1831static int ath5k_common_padpos(struct sk_buff *skb)
1832{
1833 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1834 __le16 frame_control = hdr->frame_control;
1835 int padpos = 24;
1836
1837 if (ieee80211_has_a4(frame_control)) {
1838 padpos += ETH_ALEN;
1839 }
1840 if (ieee80211_is_data_qos(frame_control)) {
1841 padpos += IEEE80211_QOS_CTL_LEN;
1842 }
1843
1844 return padpos;
1845}
1846
1847/*
1848 * This function expects a 802.11 frame and returns the number of
1849 * bytes added, or -1 if we don't have enought header room.
1850 */
1851
1852static int ath5k_add_padding(struct sk_buff *skb)
1853{
1854 int padpos = ath5k_common_padpos(skb);
1855 int padsize = padpos & 3;
1856
1857 if (padsize && skb->len>padpos) {
1858
1859 if (skb_headroom(skb) < padsize)
1860 return -1;
1861
1862 skb_push(skb, padsize);
1863 memmove(skb->data, skb->data+padsize, padpos);
1864 return padsize;
1865 }
1866
1867 return 0;
1868}
1869
1870/*
1871 * This function expects a 802.11 frame and returns the number of
1872 * bytes removed
1873 */
1874
1875static int ath5k_remove_padding(struct sk_buff *skb)
1876{
1877 int padpos = ath5k_common_padpos(skb);
1878 int padsize = padpos & 3;
1879
1880 if (padsize && skb->len>=padpos+padsize) {
1881 memmove(skb->data + padsize, skb->data, padpos);
1882 skb_pull(skb, padsize);
1883 return padsize;
1884 }
1885
1886 return 0;
1887}
1888
1889static void
1810ath5k_tasklet_rx(unsigned long data) 1890ath5k_tasklet_rx(unsigned long data)
1811{ 1891{
1812 struct ieee80211_rx_status *rxs; 1892 struct ieee80211_rx_status *rxs;
@@ -1819,8 +1899,6 @@ ath5k_tasklet_rx(unsigned long data)
1819 struct ath5k_buf *bf; 1899 struct ath5k_buf *bf;
1820 struct ath5k_desc *ds; 1900 struct ath5k_desc *ds;
1821 int ret; 1901 int ret;
1822 int hdrlen;
1823 int padsize;
1824 int rx_flag; 1902 int rx_flag;
1825 1903
1826 spin_lock(&sc->rxbuflock); 1904 spin_lock(&sc->rxbuflock);
@@ -1845,18 +1923,30 @@ ath5k_tasklet_rx(unsigned long data)
1845 break; 1923 break;
1846 else if (unlikely(ret)) { 1924 else if (unlikely(ret)) {
1847 ATH5K_ERR(sc, "error in processing rx descriptor\n"); 1925 ATH5K_ERR(sc, "error in processing rx descriptor\n");
1926 sc->stats.rxerr_proc++;
1848 spin_unlock(&sc->rxbuflock); 1927 spin_unlock(&sc->rxbuflock);
1849 return; 1928 return;
1850 } 1929 }
1851 1930
1931 sc->stats.rx_all_count++;
1932
1852 if (unlikely(rs.rs_more)) { 1933 if (unlikely(rs.rs_more)) {
1853 ATH5K_WARN(sc, "unsupported jumbo\n"); 1934 ATH5K_WARN(sc, "unsupported jumbo\n");
1935 sc->stats.rxerr_jumbo++;
1854 goto next; 1936 goto next;
1855 } 1937 }
1856 1938
1857 if (unlikely(rs.rs_status)) { 1939 if (unlikely(rs.rs_status)) {
1858 if (rs.rs_status & AR5K_RXERR_PHY) 1940 if (rs.rs_status & AR5K_RXERR_CRC)
1941 sc->stats.rxerr_crc++;
1942 if (rs.rs_status & AR5K_RXERR_FIFO)
1943 sc->stats.rxerr_fifo++;
1944 if (rs.rs_status & AR5K_RXERR_PHY) {
1945 sc->stats.rxerr_phy++;
1946 if (rs.rs_phyerr > 0 && rs.rs_phyerr < 32)
1947 sc->stats.rxerr_phy_code[rs.rs_phyerr]++;
1859 goto next; 1948 goto next;
1949 }
1860 if (rs.rs_status & AR5K_RXERR_DECRYPT) { 1950 if (rs.rs_status & AR5K_RXERR_DECRYPT) {
1861 /* 1951 /*
1862 * Decrypt error. If the error occurred 1952 * Decrypt error. If the error occurred
@@ -1868,12 +1958,14 @@ ath5k_tasklet_rx(unsigned long data)
1868 * 1958 *
1869 * XXX do key cache faulting 1959 * XXX do key cache faulting
1870 */ 1960 */
1961 sc->stats.rxerr_decrypt++;
1871 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID && 1962 if (rs.rs_keyix == AR5K_RXKEYIX_INVALID &&
1872 !(rs.rs_status & AR5K_RXERR_CRC)) 1963 !(rs.rs_status & AR5K_RXERR_CRC))
1873 goto accept; 1964 goto accept;
1874 } 1965 }
1875 if (rs.rs_status & AR5K_RXERR_MIC) { 1966 if (rs.rs_status & AR5K_RXERR_MIC) {
1876 rx_flag |= RX_FLAG_MMIC_ERROR; 1967 rx_flag |= RX_FLAG_MMIC_ERROR;
1968 sc->stats.rxerr_mic++;
1877 goto accept; 1969 goto accept;
1878 } 1970 }
1879 1971
@@ -1905,12 +1997,8 @@ accept:
1905 * bytes and we can optimize this a bit. In addition, we must 1997 * bytes and we can optimize this a bit. In addition, we must
1906 * not try to remove padding from short control frames that do 1998 * not try to remove padding from short control frames that do
1907 * not have payload. */ 1999 * not have payload. */
1908 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2000 ath5k_remove_padding(skb);
1909 padsize = ath5k_pad_size(hdrlen); 2001
1910 if (padsize) {
1911 memmove(skb->data + padsize, skb->data, hdrlen);
1912 skb_pull(skb, padsize);
1913 }
1914 rxs = IEEE80211_SKB_RXCB(skb); 2002 rxs = IEEE80211_SKB_RXCB(skb);
1915 2003
1916 /* 2004 /*
@@ -1943,6 +2031,12 @@ accept:
1943 rxs->signal = rxs->noise + rs.rs_rssi; 2031 rxs->signal = rxs->noise + rs.rs_rssi;
1944 2032
1945 rxs->antenna = rs.rs_antenna; 2033 rxs->antenna = rs.rs_antenna;
2034
2035 if (rs.rs_antenna > 0 && rs.rs_antenna < 5)
2036 sc->stats.antenna_rx[rs.rs_antenna]++;
2037 else
2038 sc->stats.antenna_rx[0]++; /* invalid */
2039
1946 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate); 2040 rxs->rate_idx = ath5k_hw_to_driver_rix(sc, rs.rs_rate);
1947 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs); 2041 rxs->flag |= ath5k_rx_decrypted(sc, ds, skb, &rs);
1948 2042
@@ -1952,6 +2046,8 @@ accept:
1952 2046
1953 ath5k_debug_dump_skb(sc, skb, "RX ", 0); 2047 ath5k_debug_dump_skb(sc, skb, "RX ", 0);
1954 2048
2049 ath5k_update_beacon_rssi(sc, skb, rs.rs_rssi);
2050
1955 /* check beacons in IBSS mode */ 2051 /* check beacons in IBSS mode */
1956 if (sc->opmode == NL80211_IFTYPE_ADHOC) 2052 if (sc->opmode == NL80211_IFTYPE_ADHOC)
1957 ath5k_check_ibss_tsf(sc, skb, rxs); 2053 ath5k_check_ibss_tsf(sc, skb, rxs);
@@ -1988,6 +2084,17 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1988 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 2084 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1989 ds = bf->desc; 2085 ds = bf->desc;
1990 2086
2087 /*
2088 * It's possible that the hardware can say the buffer is
2089 * completed when it hasn't yet loaded the ds_link from
2090 * host memory and moved on. If there are more TX
2091 * descriptors in the queue, wait for TXDP to change
2092 * before processing this one.
2093 */
2094 if (ath5k_hw_get_txdp(sc->ah, txq->qnum) == bf->daddr &&
2095 !list_is_last(&bf->list, &txq->q))
2096 break;
2097
1991 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts); 2098 ret = sc->ah->ah_proc_tx_desc(sc->ah, ds, &ts);
1992 if (unlikely(ret == -EINPROGRESS)) 2099 if (unlikely(ret == -EINPROGRESS))
1993 break; 2100 break;
@@ -1997,6 +2104,7 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1997 break; 2104 break;
1998 } 2105 }
1999 2106
2107 sc->stats.tx_all_count++;
2000 skb = bf->skb; 2108 skb = bf->skb;
2001 info = IEEE80211_SKB_CB(skb); 2109 info = IEEE80211_SKB_CB(skb);
2002 bf->skb = NULL; 2110 bf->skb = NULL;
@@ -2022,14 +2130,31 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
2022 info->status.rates[ts.ts_final_idx].count++; 2130 info->status.rates[ts.ts_final_idx].count++;
2023 2131
2024 if (unlikely(ts.ts_status)) { 2132 if (unlikely(ts.ts_status)) {
2025 sc->ll_stats.dot11ACKFailureCount++; 2133 sc->stats.ack_fail++;
2026 if (ts.ts_status & AR5K_TXERR_FILT) 2134 if (ts.ts_status & AR5K_TXERR_FILT) {
2027 info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 2135 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
2136 sc->stats.txerr_filt++;
2137 }
2138 if (ts.ts_status & AR5K_TXERR_XRETRY)
2139 sc->stats.txerr_retry++;
2140 if (ts.ts_status & AR5K_TXERR_FIFO)
2141 sc->stats.txerr_fifo++;
2028 } else { 2142 } else {
2029 info->flags |= IEEE80211_TX_STAT_ACK; 2143 info->flags |= IEEE80211_TX_STAT_ACK;
2030 info->status.ack_signal = ts.ts_rssi; 2144 info->status.ack_signal = ts.ts_rssi;
2031 } 2145 }
2032 2146
2147 /*
2148 * Remove MAC header padding before giving the frame
2149 * back to mac80211.
2150 */
2151 ath5k_remove_padding(skb);
2152
2153 if (ts.ts_antenna > 0 && ts.ts_antenna < 5)
2154 sc->stats.antenna_tx[ts.ts_antenna]++;
2155 else
2156 sc->stats.antenna_tx[0]++; /* invalid */
2157
2033 ieee80211_tx_status(sc->hw, skb); 2158 ieee80211_tx_status(sc->hw, skb);
2034 2159
2035 spin_lock(&sc->txbuflock); 2160 spin_lock(&sc->txbuflock);
@@ -2073,6 +2198,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2073 int ret = 0; 2198 int ret = 0;
2074 u8 antenna; 2199 u8 antenna;
2075 u32 flags; 2200 u32 flags;
2201 const int padsize = 0;
2076 2202
2077 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 2203 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len,
2078 PCI_DMA_TODEVICE); 2204 PCI_DMA_TODEVICE);
@@ -2120,7 +2246,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
2120 * from tx power (value is in dB units already) */ 2246 * from tx power (value is in dB units already) */
2121 ds->ds_data = bf->skbaddr; 2247 ds->ds_data = bf->skbaddr;
2122 ret = ah->ah_setup_tx_desc(ah, ds, skb->len, 2248 ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
2123 ieee80211_get_hdrlen_from_skb(skb), 2249 ieee80211_get_hdrlen_from_skb(skb), padsize,
2124 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2), 2250 AR5K_PKT_TYPE_BEACON, (sc->power_level * 2),
2125 ieee80211_get_tx_rate(sc->hw, info)->hw_value, 2251 ieee80211_get_tx_rate(sc->hw, info)->hw_value,
2126 1, AR5K_TXKEYIX_INVALID, 2252 1, AR5K_TXKEYIX_INVALID,
@@ -2407,9 +2533,6 @@ ath5k_init(struct ath5k_softc *sc)
2407 */ 2533 */
2408 ath5k_stop_locked(sc); 2534 ath5k_stop_locked(sc);
2409 2535
2410 /* Set PHY calibration interval */
2411 ah->ah_cal_intval = ath5k_calinterval;
2412
2413 /* 2536 /*
2414 * The basic interface to setting the hardware in a good 2537 * The basic interface to setting the hardware in a good
2415 * state is ``reset''. On return the hardware is known to 2538 * state is ``reset''. On return the hardware is known to
@@ -2421,7 +2544,8 @@ ath5k_init(struct ath5k_softc *sc)
2421 sc->curband = &sc->sbands[sc->curchan->band]; 2544 sc->curband = &sc->sbands[sc->curchan->band];
2422 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL | 2545 sc->imask = AR5K_INT_RXOK | AR5K_INT_RXERR | AR5K_INT_RXEOL |
2423 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2546 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2424 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_SWI; 2547 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2548
2425 ret = ath5k_reset(sc, NULL); 2549 ret = ath5k_reset(sc, NULL);
2426 if (ret) 2550 if (ret)
2427 goto done; 2551 goto done;
@@ -2435,8 +2559,7 @@ ath5k_init(struct ath5k_softc *sc)
2435 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++) 2559 for (i = 0; i < AR5K_KEYTABLE_SIZE; i++)
2436 ath5k_hw_reset_key(ah, i); 2560 ath5k_hw_reset_key(ah, i);
2437 2561
2438 /* Set ack to be sent at low bit-rates */ 2562 ath5k_hw_set_ack_bitrate_high(ah, true);
2439 ath5k_hw_set_ack_bitrate_high(ah, false);
2440 ret = 0; 2563 ret = 0;
2441done: 2564done:
2442 mmiowb(); 2565 mmiowb();
@@ -2533,12 +2656,33 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2533 tasklet_kill(&sc->restq); 2656 tasklet_kill(&sc->restq);
2534 tasklet_kill(&sc->calib); 2657 tasklet_kill(&sc->calib);
2535 tasklet_kill(&sc->beacontq); 2658 tasklet_kill(&sc->beacontq);
2659 tasklet_kill(&sc->ani_tasklet);
2536 2660
2537 ath5k_rfkill_hw_stop(sc->ah); 2661 ath5k_rfkill_hw_stop(sc->ah);
2538 2662
2539 return ret; 2663 return ret;
2540} 2664}
2541 2665
2666static void
2667ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2668{
2669 if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
2670 !(ah->ah_cal_mask & AR5K_CALIBRATION_FULL)) {
2671 /* run ANI only when full calibration is not active */
2672 ah->ah_cal_next_ani = jiffies +
2673 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
2674 tasklet_schedule(&ah->ah_sc->ani_tasklet);
2675
2676 } else if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
2677 ah->ah_cal_next_full = jiffies +
2678 msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
2679 tasklet_schedule(&ah->ah_sc->calib);
2680 }
2681 /* we could use SWI to generate enough interrupts to meet our
2682 * calibration interval requirements, if necessary:
2683 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2684}
2685
2542static irqreturn_t 2686static irqreturn_t
2543ath5k_intr(int irq, void *dev_id) 2687ath5k_intr(int irq, void *dev_id)
2544{ 2688{
@@ -2562,7 +2706,20 @@ ath5k_intr(int irq, void *dev_id)
2562 */ 2706 */
2563 tasklet_schedule(&sc->restq); 2707 tasklet_schedule(&sc->restq);
2564 } else if (unlikely(status & AR5K_INT_RXORN)) { 2708 } else if (unlikely(status & AR5K_INT_RXORN)) {
2565 tasklet_schedule(&sc->restq); 2709 /*
2710 * Receive buffers are full. Either the bus is busy or
2711 * the CPU is not fast enough to process all received
2712 * frames.
2713 * Older chipsets need a reset to come out of this
2714 * condition, but we treat it as RX for newer chips.
2715 * We don't know exactly which versions need a reset -
2716 * this guess is copied from the HAL.
2717 */
2718 sc->stats.rxorn_intr++;
2719 if (ah->ah_mac_srev < AR5K_SREV_AR5212)
2720 tasklet_schedule(&sc->restq);
2721 else
2722 tasklet_schedule(&sc->rxtq);
2566 } else { 2723 } else {
2567 if (status & AR5K_INT_SWBA) { 2724 if (status & AR5K_INT_SWBA) {
2568 tasklet_hi_schedule(&sc->beacontq); 2725 tasklet_hi_schedule(&sc->beacontq);
@@ -2587,15 +2744,10 @@ ath5k_intr(int irq, void *dev_id)
2587 if (status & AR5K_INT_BMISS) { 2744 if (status & AR5K_INT_BMISS) {
2588 /* TODO */ 2745 /* TODO */
2589 } 2746 }
2590 if (status & AR5K_INT_SWI) {
2591 tasklet_schedule(&sc->calib);
2592 }
2593 if (status & AR5K_INT_MIB) { 2747 if (status & AR5K_INT_MIB) {
2594 /* 2748 sc->stats.mib_intr++;
2595 * These stats are also used for ANI i think 2749 ath5k_hw_update_mib_counters(ah);
2596 * so how about updating them more often ? 2750 ath5k_ani_mib_intr(ah);
2597 */
2598 ath5k_hw_update_mib_counters(ah, &sc->ll_stats);
2599 } 2751 }
2600 if (status & AR5K_INT_GPIO) 2752 if (status & AR5K_INT_GPIO)
2601 tasklet_schedule(&sc->rf_kill.toggleq); 2753 tasklet_schedule(&sc->rf_kill.toggleq);
@@ -2606,7 +2758,7 @@ ath5k_intr(int irq, void *dev_id)
2606 if (unlikely(!counter)) 2758 if (unlikely(!counter))
2607 ATH5K_WARN(sc, "too many interrupts, giving up for now\n"); 2759 ATH5K_WARN(sc, "too many interrupts, giving up for now\n");
2608 2760
2609 ath5k_hw_calibration_poll(ah); 2761 ath5k_intr_calibration_poll(ah);
2610 2762
2611 return IRQ_HANDLED; 2763 return IRQ_HANDLED;
2612} 2764}
@@ -2630,8 +2782,7 @@ ath5k_tasklet_calibrate(unsigned long data)
2630 struct ath5k_hw *ah = sc->ah; 2782 struct ath5k_hw *ah = sc->ah;
2631 2783
2632 /* Only full calibration for now */ 2784 /* Only full calibration for now */
2633 if (ah->ah_swi_mask != AR5K_SWI_FULL_CALIBRATION) 2785 ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
2634 return;
2635 2786
2636 /* Stop queues so that calibration 2787 /* Stop queues so that calibration
2637 * doesn't interfere with tx */ 2788 * doesn't interfere with tx */
@@ -2647,18 +2798,29 @@ ath5k_tasklet_calibrate(unsigned long data)
2647 * to load new gain values. 2798 * to load new gain values.
2648 */ 2799 */
2649 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n"); 2800 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "calibration, resetting\n");
2650 ath5k_reset_wake(sc); 2801 ath5k_reset(sc, sc->curchan);
2651 } 2802 }
2652 if (ath5k_hw_phy_calibrate(ah, sc->curchan)) 2803 if (ath5k_hw_phy_calibrate(ah, sc->curchan))
2653 ATH5K_ERR(sc, "calibration of channel %u failed\n", 2804 ATH5K_ERR(sc, "calibration of channel %u failed\n",
2654 ieee80211_frequency_to_channel( 2805 ieee80211_frequency_to_channel(
2655 sc->curchan->center_freq)); 2806 sc->curchan->center_freq));
2656 2807
2657 ah->ah_swi_mask = 0;
2658
2659 /* Wake queues */ 2808 /* Wake queues */
2660 ieee80211_wake_queues(sc->hw); 2809 ieee80211_wake_queues(sc->hw);
2661 2810
2811 ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
2812}
2813
2814
2815static void
2816ath5k_tasklet_ani(unsigned long data)
2817{
2818 struct ath5k_softc *sc = (void *)data;
2819 struct ath5k_hw *ah = sc->ah;
2820
2821 ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
2822 ath5k_ani_calibration(ah);
2823 ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
2662} 2824}
2663 2825
2664 2826
@@ -2680,7 +2842,6 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2680 struct ath5k_softc *sc = hw->priv; 2842 struct ath5k_softc *sc = hw->priv;
2681 struct ath5k_buf *bf; 2843 struct ath5k_buf *bf;
2682 unsigned long flags; 2844 unsigned long flags;
2683 int hdrlen;
2684 int padsize; 2845 int padsize;
2685 2846
2686 ath5k_debug_dump_skb(sc, skb, "TX ", 1); 2847 ath5k_debug_dump_skb(sc, skb, "TX ", 1);
@@ -2692,17 +2853,11 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2692 * the hardware expects the header padded to 4 byte boundaries 2853 * the hardware expects the header padded to 4 byte boundaries
2693 * if this is not the case we add the padding after the header 2854 * if this is not the case we add the padding after the header
2694 */ 2855 */
2695 hdrlen = ieee80211_get_hdrlen_from_skb(skb); 2856 padsize = ath5k_add_padding(skb);
2696 padsize = ath5k_pad_size(hdrlen); 2857 if (padsize < 0) {
2697 if (padsize) { 2858 ATH5K_ERR(sc, "tx hdrlen not %%4: not enough"
2698 2859 " headroom to pad");
2699 if (skb_headroom(skb) < padsize) { 2860 goto drop_packet;
2700 ATH5K_ERR(sc, "tx hdrlen not %%4: %d not enough"
2701 " headroom to pad %d\n", hdrlen, padsize);
2702 goto drop_packet;
2703 }
2704 skb_push(skb, padsize);
2705 memmove(skb->data, skb->data+padsize, hdrlen);
2706 } 2861 }
2707 2862
2708 spin_lock_irqsave(&sc->txbuflock, flags); 2863 spin_lock_irqsave(&sc->txbuflock, flags);
@@ -2721,7 +2876,7 @@ static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
2721 2876
2722 bf->skb = skb; 2877 bf->skb = skb;
2723 2878
2724 if (ath5k_txbuf_setup(sc, bf, txq)) { 2879 if (ath5k_txbuf_setup(sc, bf, txq, padsize)) {
2725 bf->skb = NULL; 2880 bf->skb = NULL;
2726 spin_lock_irqsave(&sc->txbuflock, flags); 2881 spin_lock_irqsave(&sc->txbuflock, flags);
2727 list_add_tail(&bf->list, &sc->txbuf); 2882 list_add_tail(&bf->list, &sc->txbuf);
@@ -2768,6 +2923,8 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2768 goto err; 2923 goto err;
2769 } 2924 }
2770 2925
2926 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode);
2927
2771 /* 2928 /*
2772 * Change channels and update the h/w rate map if we're switching; 2929 * Change channels and update the h/w rate map if we're switching;
2773 * e.g. 11a to 11b/g. 2930 * e.g. 11a to 11b/g.
@@ -2836,6 +2993,8 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
2836 goto end; 2993 goto end;
2837 } 2994 }
2838 2995
2996 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", sc->opmode);
2997
2839 ath5k_hw_set_lladdr(sc->ah, vif->addr); 2998 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2840 ath5k_mode_setup(sc); 2999 ath5k_mode_setup(sc);
2841 3000
@@ -2906,7 +3065,7 @@ ath5k_config(struct ieee80211_hw *hw, u32 changed)
2906 * then we must allow the user to set how many tx antennas we 3065 * then we must allow the user to set how many tx antennas we
2907 * have available 3066 * have available
2908 */ 3067 */
2909 ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT); 3068 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
2910 3069
2911unlock: 3070unlock:
2912 mutex_unlock(&sc->lock); 3071 mutex_unlock(&sc->lock);
@@ -2914,22 +3073,20 @@ unlock:
2914} 3073}
2915 3074
2916static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw, 3075static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2917 int mc_count, struct dev_addr_list *mclist) 3076 struct netdev_hw_addr_list *mc_list)
2918{ 3077{
2919 u32 mfilt[2], val; 3078 u32 mfilt[2], val;
2920 int i;
2921 u8 pos; 3079 u8 pos;
3080 struct netdev_hw_addr *ha;
2922 3081
2923 mfilt[0] = 0; 3082 mfilt[0] = 0;
2924 mfilt[1] = 1; 3083 mfilt[1] = 1;
2925 3084
2926 for (i = 0; i < mc_count; i++) { 3085 netdev_hw_addr_list_for_each(ha, mc_list) {
2927 if (!mclist)
2928 break;
2929 /* calculate XOR of eight 6-bit values */ 3086 /* calculate XOR of eight 6-bit values */
2930 val = get_unaligned_le32(mclist->dmi_addr + 0); 3087 val = get_unaligned_le32(ha->addr + 0);
2931 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3088 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2932 val = get_unaligned_le32(mclist->dmi_addr + 3); 3089 val = get_unaligned_le32(ha->addr + 3);
2933 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; 3090 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2934 pos &= 0x3f; 3091 pos &= 0x3f;
2935 mfilt[pos / 32] |= (1 << (pos % 32)); 3092 mfilt[pos / 32] |= (1 << (pos % 32));
@@ -2937,8 +3094,7 @@ static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2937 * but not sure, needs testing, if we do use this we'd 3094 * but not sure, needs testing, if we do use this we'd
2938 * neet to inform below to not reset the mcast */ 3095 * neet to inform below to not reset the mcast */
2939 /* ath5k_hw_set_mcast_filterindex(ah, 3096 /* ath5k_hw_set_mcast_filterindex(ah,
2940 * mclist->dmi_addr[5]); */ 3097 * ha->addr[5]); */
2941 mclist = mclist->next;
2942 } 3098 }
2943 3099
2944 return ((u64)(mfilt[1]) << 32) | mfilt[0]; 3100 return ((u64)(mfilt[1]) << 32) | mfilt[0];
@@ -3124,12 +3280,14 @@ ath5k_get_stats(struct ieee80211_hw *hw,
3124 struct ieee80211_low_level_stats *stats) 3280 struct ieee80211_low_level_stats *stats)
3125{ 3281{
3126 struct ath5k_softc *sc = hw->priv; 3282 struct ath5k_softc *sc = hw->priv;
3127 struct ath5k_hw *ah = sc->ah;
3128 3283
3129 /* Force update */ 3284 /* Force update */
3130 ath5k_hw_update_mib_counters(ah, &sc->ll_stats); 3285 ath5k_hw_update_mib_counters(sc->ah);
3131 3286
3132 memcpy(stats, &sc->ll_stats, sizeof(sc->ll_stats)); 3287 stats->dot11ACKFailureCount = sc->stats.ack_fail;
3288 stats->dot11RTSFailureCount = sc->stats.rts_fail;
3289 stats->dot11RTSSuccessCount = sc->stats.rts_ok;
3290 stats->dot11FCSErrorCount = sc->stats.fcs_error;
3133 3291
3134 return 0; 3292 return 0;
3135} 3293}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 7e1a88a5abdb..56221bc7c8cd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -50,6 +50,7 @@
50 50
51#include "ath5k.h" 51#include "ath5k.h"
52#include "debug.h" 52#include "debug.h"
53#include "ani.h"
53 54
54#include "../regd.h" 55#include "../regd.h"
55#include "../ath.h" 56#include "../ath.h"
@@ -105,6 +106,38 @@ struct ath5k_rfkill {
105 struct tasklet_struct toggleq; 106 struct tasklet_struct toggleq;
106}; 107};
107 108
109/* statistics */
110struct ath5k_statistics {
111 /* antenna use */
112 unsigned int antenna_rx[5]; /* frames count per antenna RX */
113 unsigned int antenna_tx[5]; /* frames count per antenna TX */
114
115 /* frame errors */
116 unsigned int rx_all_count; /* all RX frames, including errors */
117 unsigned int tx_all_count; /* all TX frames, including errors */
118 unsigned int rxerr_crc;
119 unsigned int rxerr_phy;
120 unsigned int rxerr_phy_code[32];
121 unsigned int rxerr_fifo;
122 unsigned int rxerr_decrypt;
123 unsigned int rxerr_mic;
124 unsigned int rxerr_proc;
125 unsigned int rxerr_jumbo;
126 unsigned int txerr_retry;
127 unsigned int txerr_fifo;
128 unsigned int txerr_filt;
129
130 /* MIB counters */
131 unsigned int ack_fail;
132 unsigned int rts_fail;
133 unsigned int rts_ok;
134 unsigned int fcs_error;
135 unsigned int beacons;
136
137 unsigned int mib_intr;
138 unsigned int rxorn_intr;
139};
140
108#if CHAN_DEBUG 141#if CHAN_DEBUG
109#define ATH_CHAN_MAX (26+26+26+200+200) 142#define ATH_CHAN_MAX (26+26+26+200+200)
110#else 143#else
@@ -117,7 +150,6 @@ struct ath5k_softc {
117 struct pci_dev *pdev; /* for dma mapping */ 150 struct pci_dev *pdev; /* for dma mapping */
118 void __iomem *iobase; /* address of the device */ 151 void __iomem *iobase; /* address of the device */
119 struct mutex lock; /* dev-level lock */ 152 struct mutex lock; /* dev-level lock */
120 struct ieee80211_low_level_stats ll_stats;
121 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 153 struct ieee80211_hw *hw; /* IEEE 802.11 common */
122 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 154 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
123 struct ieee80211_channel channels[ATH_CHAN_MAX]; 155 struct ieee80211_channel channels[ATH_CHAN_MAX];
@@ -191,6 +223,11 @@ struct ath5k_softc {
191 int power_level; /* Requested tx power in dbm */ 223 int power_level; /* Requested tx power in dbm */
192 bool assoc; /* associate state */ 224 bool assoc; /* associate state */
193 bool enable_beacon; /* true if beacons are on */ 225 bool enable_beacon; /* true if beacons are on */
226
227 struct ath5k_statistics stats;
228
229 struct ath5k_ani_state ani_state;
230 struct tasklet_struct ani_tasklet; /* ANI calibration */
194}; 231};
195 232
196#define ath5k_hw_hasbssidmask(_ah) \ 233#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index 367a6c7d3cc7..74f007126f41 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -102,9 +102,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
102 } 102 }
103 } 103 }
104 104
105 /* GPIO */
106 ah->ah_gpio_npins = AR5K_NUM_GPIO;
107
108 /* Set number of supported TX queues */ 105 /* Set number of supported TX queues */
109 if (ah->ah_version == AR5K_AR5210) 106 if (ah->ah_version == AR5K_AR5210)
110 ah->ah_capabilities.cap_queues.q_tx_num = 107 ah->ah_capabilities.cap_queues.q_tx_num =
@@ -112,6 +109,12 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
112 else 109 else
113 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES; 110 ah->ah_capabilities.cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
114 111
112 /* newer hardware has PHY error counters */
113 if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
114 ah->ah_capabilities.cap_has_phyerr_counters = true;
115 else
116 ah->ah_capabilities.cap_has_phyerr_counters = false;
117
115 return 0; 118 return 0;
116} 119}
117 120
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index 747508c15d34..6fb5c5ffa5b1 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -69,6 +69,7 @@ module_param_named(debug, ath5k_debug, uint, 0);
69 69
70#include <linux/seq_file.h> 70#include <linux/seq_file.h>
71#include "reg.h" 71#include "reg.h"
72#include "ani.h"
72 73
73static struct dentry *ath5k_global_debugfs; 74static struct dentry *ath5k_global_debugfs;
74 75
@@ -307,6 +308,7 @@ static const struct {
307 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 308 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
308 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 309 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
309 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" }, 310 { ATH5K_DEBUG_TRACE, "trace", "trace function calls" },
311 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
310 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 312 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
311}; 313};
312 314
@@ -364,6 +366,369 @@ static const struct file_operations fops_debug = {
364}; 366};
365 367
366 368
369/* debugfs: antenna */
370
371static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
372 size_t count, loff_t *ppos)
373{
374 struct ath5k_softc *sc = file->private_data;
375 char buf[700];
376 unsigned int len = 0;
377 unsigned int i;
378 unsigned int v;
379
380 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
381 sc->ah->ah_ant_mode);
382 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
383 sc->ah->ah_def_ant);
384 len += snprintf(buf+len, sizeof(buf)-len, "tx antenna\t%d\n",
385 sc->ah->ah_tx_ant);
386
387 len += snprintf(buf+len, sizeof(buf)-len, "\nANTENNA\t\tRX\tTX\n");
388 for (i = 1; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
389 len += snprintf(buf+len, sizeof(buf)-len,
390 "[antenna %d]\t%d\t%d\n",
391 i, sc->stats.antenna_rx[i], sc->stats.antenna_tx[i]);
392 }
393 len += snprintf(buf+len, sizeof(buf)-len, "[invalid]\t%d\t%d\n",
394 sc->stats.antenna_rx[0], sc->stats.antenna_tx[0]);
395
396 v = ath5k_hw_reg_read(sc->ah, AR5K_DEFAULT_ANTENNA);
397 len += snprintf(buf+len, sizeof(buf)-len,
398 "\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
399
400 v = ath5k_hw_reg_read(sc->ah, AR5K_STA_ID1);
401 len += snprintf(buf+len, sizeof(buf)-len,
402 "AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
403 (v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
404 len += snprintf(buf+len, sizeof(buf)-len,
405 "AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
406 (v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
407 len += snprintf(buf+len, sizeof(buf)-len,
408 "AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
409 (v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
410 len += snprintf(buf+len, sizeof(buf)-len,
411 "AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
412 (v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
413
414 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_AGCCTL);
415 len += snprintf(buf+len, sizeof(buf)-len,
416 "\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
417 (v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
418
419 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_RESTART);
420 len += snprintf(buf+len, sizeof(buf)-len,
421 "AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
422 (v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
423
424 v = ath5k_hw_reg_read(sc->ah, AR5K_PHY_FAST_ANT_DIV);
425 len += snprintf(buf+len, sizeof(buf)-len,
426 "AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
427 (v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
428
429 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
430}
431
432static ssize_t write_file_antenna(struct file *file,
433 const char __user *userbuf,
434 size_t count, loff_t *ppos)
435{
436 struct ath5k_softc *sc = file->private_data;
437 unsigned int i;
438 char buf[20];
439
440 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
441 return -EFAULT;
442
443 if (strncmp(buf, "diversity", 9) == 0) {
444 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
445 printk(KERN_INFO "ath5k debug: enable diversity\n");
446 } else if (strncmp(buf, "fixed-a", 7) == 0) {
447 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
448 printk(KERN_INFO "ath5k debugfs: fixed antenna A\n");
449 } else if (strncmp(buf, "fixed-b", 7) == 0) {
450 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
451 printk(KERN_INFO "ath5k debug: fixed antenna B\n");
452 } else if (strncmp(buf, "clear", 5) == 0) {
453 for (i = 0; i < ARRAY_SIZE(sc->stats.antenna_rx); i++) {
454 sc->stats.antenna_rx[i] = 0;
455 sc->stats.antenna_tx[i] = 0;
456 }
457 printk(KERN_INFO "ath5k debug: cleared antenna stats\n");
458 }
459 return count;
460}
461
462static const struct file_operations fops_antenna = {
463 .read = read_file_antenna,
464 .write = write_file_antenna,
465 .open = ath5k_debugfs_open,
466 .owner = THIS_MODULE,
467};
468
469
470/* debugfs: frameerrors */
471
472static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
473 size_t count, loff_t *ppos)
474{
475 struct ath5k_softc *sc = file->private_data;
476 struct ath5k_statistics *st = &sc->stats;
477 char buf[700];
478 unsigned int len = 0;
479 int i;
480
481 len += snprintf(buf+len, sizeof(buf)-len,
482 "RX\n---------------------\n");
483 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n",
484 st->rxerr_crc,
485 st->rx_all_count > 0 ?
486 st->rxerr_crc*100/st->rx_all_count : 0);
487 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n",
488 st->rxerr_phy,
489 st->rx_all_count > 0 ?
490 st->rxerr_phy*100/st->rx_all_count : 0);
491 for (i = 0; i < 32; i++) {
492 if (st->rxerr_phy_code[i])
493 len += snprintf(buf+len, sizeof(buf)-len,
494 " phy_err[%d]\t%d\n",
495 i, st->rxerr_phy_code[i]);
496 }
497
498 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
499 st->rxerr_fifo,
500 st->rx_all_count > 0 ?
501 st->rxerr_fifo*100/st->rx_all_count : 0);
502 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n",
503 st->rxerr_decrypt,
504 st->rx_all_count > 0 ?
505 st->rxerr_decrypt*100/st->rx_all_count : 0);
506 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n",
507 st->rxerr_mic,
508 st->rx_all_count > 0 ?
509 st->rxerr_mic*100/st->rx_all_count : 0);
510 len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n",
511 st->rxerr_proc,
512 st->rx_all_count > 0 ?
513 st->rxerr_proc*100/st->rx_all_count : 0);
514 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n",
515 st->rxerr_jumbo,
516 st->rx_all_count > 0 ?
517 st->rxerr_jumbo*100/st->rx_all_count : 0);
518 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n",
519 st->rx_all_count);
520
521 len += snprintf(buf+len, sizeof(buf)-len,
522 "\nTX\n---------------------\n");
523 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n",
524 st->txerr_retry,
525 st->tx_all_count > 0 ?
526 st->txerr_retry*100/st->tx_all_count : 0);
527 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n",
528 st->txerr_fifo,
529 st->tx_all_count > 0 ?
530 st->txerr_fifo*100/st->tx_all_count : 0);
531 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n",
532 st->txerr_filt,
533 st->tx_all_count > 0 ?
534 st->txerr_filt*100/st->tx_all_count : 0);
535 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n",
536 st->tx_all_count);
537
538 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
539}
540
541static ssize_t write_file_frameerrors(struct file *file,
542 const char __user *userbuf,
543 size_t count, loff_t *ppos)
544{
545 struct ath5k_softc *sc = file->private_data;
546 struct ath5k_statistics *st = &sc->stats;
547 char buf[20];
548
549 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
550 return -EFAULT;
551
552 if (strncmp(buf, "clear", 5) == 0) {
553 st->rxerr_crc = 0;
554 st->rxerr_phy = 0;
555 st->rxerr_fifo = 0;
556 st->rxerr_decrypt = 0;
557 st->rxerr_mic = 0;
558 st->rxerr_proc = 0;
559 st->rxerr_jumbo = 0;
560 st->rx_all_count = 0;
561 st->txerr_retry = 0;
562 st->txerr_fifo = 0;
563 st->txerr_filt = 0;
564 st->tx_all_count = 0;
565 printk(KERN_INFO "ath5k debug: cleared frameerrors stats\n");
566 }
567 return count;
568}
569
570static const struct file_operations fops_frameerrors = {
571 .read = read_file_frameerrors,
572 .write = write_file_frameerrors,
573 .open = ath5k_debugfs_open,
574 .owner = THIS_MODULE,
575};
576
577
578/* debugfs: ani */
579
580static ssize_t read_file_ani(struct file *file, char __user *user_buf,
581 size_t count, loff_t *ppos)
582{
583 struct ath5k_softc *sc = file->private_data;
584 struct ath5k_statistics *st = &sc->stats;
585 struct ath5k_ani_state *as = &sc->ani_state;
586
587 char buf[700];
588 unsigned int len = 0;
589
590 len += snprintf(buf+len, sizeof(buf)-len,
591 "HW has PHY error counters:\t%s\n",
592 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
593 "yes" : "no");
594 len += snprintf(buf+len, sizeof(buf)-len,
595 "HW max spur immunity level:\t%d\n",
596 as->max_spur_level);
597 len += snprintf(buf+len, sizeof(buf)-len,
598 "\nANI state\n--------------------------------------------\n");
599 len += snprintf(buf+len, sizeof(buf)-len, "operating mode:\t\t\t");
600 switch (as->ani_mode) {
601 case ATH5K_ANI_MODE_OFF:
602 len += snprintf(buf+len, sizeof(buf)-len, "OFF\n");
603 break;
604 case ATH5K_ANI_MODE_MANUAL_LOW:
605 len += snprintf(buf+len, sizeof(buf)-len,
606 "MANUAL LOW\n");
607 break;
608 case ATH5K_ANI_MODE_MANUAL_HIGH:
609 len += snprintf(buf+len, sizeof(buf)-len,
610 "MANUAL HIGH\n");
611 break;
612 case ATH5K_ANI_MODE_AUTO:
613 len += snprintf(buf+len, sizeof(buf)-len, "AUTO\n");
614 break;
615 default:
616 len += snprintf(buf+len, sizeof(buf)-len,
617 "??? (not good)\n");
618 break;
619 }
620 len += snprintf(buf+len, sizeof(buf)-len,
621 "noise immunity level:\t\t%d\n",
622 as->noise_imm_level);
623 len += snprintf(buf+len, sizeof(buf)-len,
624 "spur immunity level:\t\t%d\n",
625 as->spur_level);
626 len += snprintf(buf+len, sizeof(buf)-len, "firstep level:\t\t\t%d\n",
627 as->firstep_level);
628 len += snprintf(buf+len, sizeof(buf)-len,
629 "OFDM weak signal detection:\t%s\n",
630 as->ofdm_weak_sig ? "on" : "off");
631 len += snprintf(buf+len, sizeof(buf)-len,
632 "CCK weak signal detection:\t%s\n",
633 as->cck_weak_sig ? "on" : "off");
634
635 len += snprintf(buf+len, sizeof(buf)-len,
636 "\nMIB INTERRUPTS:\t\t%u\n",
637 st->mib_intr);
638 len += snprintf(buf+len, sizeof(buf)-len,
639 "beacon RSSI average:\t%d\n",
640 sc->ah->ah_beacon_rssi_avg.avg);
641 len += snprintf(buf+len, sizeof(buf)-len, "profcnt tx\t\t%u\t(%d%%)\n",
642 as->pfc_tx,
643 as->pfc_cycles > 0 ?
644 as->pfc_tx*100/as->pfc_cycles : 0);
645 len += snprintf(buf+len, sizeof(buf)-len, "profcnt rx\t\t%u\t(%d%%)\n",
646 as->pfc_rx,
647 as->pfc_cycles > 0 ?
648 as->pfc_rx*100/as->pfc_cycles : 0);
649 len += snprintf(buf+len, sizeof(buf)-len, "profcnt busy\t\t%u\t(%d%%)\n",
650 as->pfc_busy,
651 as->pfc_cycles > 0 ?
652 as->pfc_busy*100/as->pfc_cycles : 0);
653 len += snprintf(buf+len, sizeof(buf)-len, "profcnt cycles\t\t%u\n",
654 as->pfc_cycles);
655 len += snprintf(buf+len, sizeof(buf)-len,
656 "listen time\t\t%d\tlast: %d\n",
657 as->listen_time, as->last_listen);
658 len += snprintf(buf+len, sizeof(buf)-len,
659 "OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
660 as->ofdm_errors, as->last_ofdm_errors,
661 as->sum_ofdm_errors);
662 len += snprintf(buf+len, sizeof(buf)-len,
663 "CCK errors\t\t%u\tlast: %u\tsum: %u\n",
664 as->cck_errors, as->last_cck_errors,
665 as->sum_cck_errors);
666 len += snprintf(buf+len, sizeof(buf)-len,
667 "AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
668 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1),
669 ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
670 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT1)));
671 len += snprintf(buf+len, sizeof(buf)-len,
672 "AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
673 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2),
674 ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
675 ath5k_hw_reg_read(sc->ah, AR5K_PHYERR_CNT2)));
676
677 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
678}
679
680static ssize_t write_file_ani(struct file *file,
681 const char __user *userbuf,
682 size_t count, loff_t *ppos)
683{
684 struct ath5k_softc *sc = file->private_data;
685 char buf[20];
686
687 if (copy_from_user(buf, userbuf, min(count, sizeof(buf))))
688 return -EFAULT;
689
690 if (strncmp(buf, "sens-low", 8) == 0) {
691 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_HIGH);
692 } else if (strncmp(buf, "sens-high", 9) == 0) {
693 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_MANUAL_LOW);
694 } else if (strncmp(buf, "ani-off", 7) == 0) {
695 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_OFF);
696 } else if (strncmp(buf, "ani-on", 6) == 0) {
697 ath5k_ani_init(sc->ah, ATH5K_ANI_MODE_AUTO);
698 } else if (strncmp(buf, "noise-low", 9) == 0) {
699 ath5k_ani_set_noise_immunity_level(sc->ah, 0);
700 } else if (strncmp(buf, "noise-high", 10) == 0) {
701 ath5k_ani_set_noise_immunity_level(sc->ah,
702 ATH5K_ANI_MAX_NOISE_IMM_LVL);
703 } else if (strncmp(buf, "spur-low", 8) == 0) {
704 ath5k_ani_set_spur_immunity_level(sc->ah, 0);
705 } else if (strncmp(buf, "spur-high", 9) == 0) {
706 ath5k_ani_set_spur_immunity_level(sc->ah,
707 sc->ani_state.max_spur_level);
708 } else if (strncmp(buf, "fir-low", 7) == 0) {
709 ath5k_ani_set_firstep_level(sc->ah, 0);
710 } else if (strncmp(buf, "fir-high", 8) == 0) {
711 ath5k_ani_set_firstep_level(sc->ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
712 } else if (strncmp(buf, "ofdm-off", 8) == 0) {
713 ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, false);
714 } else if (strncmp(buf, "ofdm-on", 7) == 0) {
715 ath5k_ani_set_ofdm_weak_signal_detection(sc->ah, true);
716 } else if (strncmp(buf, "cck-off", 7) == 0) {
717 ath5k_ani_set_cck_weak_signal_detection(sc->ah, false);
718 } else if (strncmp(buf, "cck-on", 6) == 0) {
719 ath5k_ani_set_cck_weak_signal_detection(sc->ah, true);
720 }
721 return count;
722}
723
724static const struct file_operations fops_ani = {
725 .read = read_file_ani,
726 .write = write_file_ani,
727 .open = ath5k_debugfs_open,
728 .owner = THIS_MODULE,
729};
730
731
367/* init */ 732/* init */
368 733
369void 734void
@@ -393,6 +758,20 @@ ath5k_debug_init_device(struct ath5k_softc *sc)
393 758
394 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR, 759 sc->debug.debugfs_reset = debugfs_create_file("reset", S_IWUSR,
395 sc->debug.debugfs_phydir, sc, &fops_reset); 760 sc->debug.debugfs_phydir, sc, &fops_reset);
761
762 sc->debug.debugfs_antenna = debugfs_create_file("antenna",
763 S_IWUSR | S_IRUSR,
764 sc->debug.debugfs_phydir, sc, &fops_antenna);
765
766 sc->debug.debugfs_frameerrors = debugfs_create_file("frameerrors",
767 S_IWUSR | S_IRUSR,
768 sc->debug.debugfs_phydir, sc,
769 &fops_frameerrors);
770
771 sc->debug.debugfs_ani = debugfs_create_file("ani",
772 S_IWUSR | S_IRUSR,
773 sc->debug.debugfs_phydir, sc,
774 &fops_ani);
396} 775}
397 776
398void 777void
@@ -408,6 +787,9 @@ ath5k_debug_finish_device(struct ath5k_softc *sc)
408 debugfs_remove(sc->debug.debugfs_registers); 787 debugfs_remove(sc->debug.debugfs_registers);
409 debugfs_remove(sc->debug.debugfs_beacon); 788 debugfs_remove(sc->debug.debugfs_beacon);
410 debugfs_remove(sc->debug.debugfs_reset); 789 debugfs_remove(sc->debug.debugfs_reset);
790 debugfs_remove(sc->debug.debugfs_antenna);
791 debugfs_remove(sc->debug.debugfs_frameerrors);
792 debugfs_remove(sc->debug.debugfs_ani);
411 debugfs_remove(sc->debug.debugfs_phydir); 793 debugfs_remove(sc->debug.debugfs_phydir);
412} 794}
413 795
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 66f69f04e55e..ddd5b3a99e8d 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -74,6 +74,9 @@ struct ath5k_dbg_info {
74 struct dentry *debugfs_registers; 74 struct dentry *debugfs_registers;
75 struct dentry *debugfs_beacon; 75 struct dentry *debugfs_beacon;
76 struct dentry *debugfs_reset; 76 struct dentry *debugfs_reset;
77 struct dentry *debugfs_antenna;
78 struct dentry *debugfs_frameerrors;
79 struct dentry *debugfs_ani;
77}; 80};
78 81
79/** 82/**
@@ -113,6 +116,7 @@ enum ath5k_debug_level {
113 ATH5K_DEBUG_DUMP_TX = 0x00000200, 116 ATH5K_DEBUG_DUMP_TX = 0x00000200,
114 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 117 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
115 ATH5K_DEBUG_TRACE = 0x00001000, 118 ATH5K_DEBUG_TRACE = 0x00001000,
119 ATH5K_DEBUG_ANI = 0x00002000,
116 ATH5K_DEBUG_ANY = 0xffffffff 120 ATH5K_DEBUG_ANY = 0xffffffff
117}; 121};
118 122
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index dc30a2b70a6b..7d7b646ab65a 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -35,7 +35,8 @@
35 */ 35 */
36static int 36static int
37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc, 37ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
38 unsigned int pkt_len, unsigned int hdr_len, enum ath5k_pkt_type type, 38 unsigned int pkt_len, unsigned int hdr_len, int padsize,
39 enum ath5k_pkt_type type,
39 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0, 40 unsigned int tx_power, unsigned int tx_rate0, unsigned int tx_tries0,
40 unsigned int key_index, unsigned int antenna_mode, unsigned int flags, 41 unsigned int key_index, unsigned int antenna_mode, unsigned int flags,
41 unsigned int rtscts_rate, unsigned int rtscts_duration) 42 unsigned int rtscts_rate, unsigned int rtscts_duration)
@@ -71,7 +72,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
71 /* Verify and set frame length */ 72 /* Verify and set frame length */
72 73
73 /* remove padding we might have added before */ 74 /* remove padding we might have added before */
74 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; 75 frame_len = pkt_len - padsize + FCS_LEN;
75 76
76 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN) 77 if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
77 return -EINVAL; 78 return -EINVAL;
@@ -100,7 +101,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
100 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN); 101 AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN);
101 } 102 }
102 103
103 /*Diferences between 5210-5211*/ 104 /*Differences between 5210-5211*/
104 if (ah->ah_version == AR5K_AR5210) { 105 if (ah->ah_version == AR5K_AR5210) {
105 switch (type) { 106 switch (type) {
106 case AR5K_PKT_TYPE_BEACON: 107 case AR5K_PKT_TYPE_BEACON:
@@ -165,6 +166,7 @@ ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
165 */ 166 */
166static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah, 167static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
167 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len, 168 struct ath5k_desc *desc, unsigned int pkt_len, unsigned int hdr_len,
169 int padsize,
168 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0, 170 enum ath5k_pkt_type type, unsigned int tx_power, unsigned int tx_rate0,
169 unsigned int tx_tries0, unsigned int key_index, 171 unsigned int tx_tries0, unsigned int key_index,
170 unsigned int antenna_mode, unsigned int flags, 172 unsigned int antenna_mode, unsigned int flags,
@@ -206,7 +208,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
206 /* Verify and set frame length */ 208 /* Verify and set frame length */
207 209
208 /* remove padding we might have added before */ 210 /* remove padding we might have added before */
209 frame_len = pkt_len - ath5k_pad_size(hdr_len) + FCS_LEN; 211 frame_len = pkt_len - padsize + FCS_LEN;
210 212
211 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN) 213 if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
212 return -EINVAL; 214 return -EINVAL;
@@ -229,7 +231,7 @@ static int ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
229 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT); 231 AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
230 tx_ctl->tx_control_1 |= AR5K_REG_SM(type, 232 tx_ctl->tx_control_1 |= AR5K_REG_SM(type,
231 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE); 233 AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
232 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0 + AR5K_TUNE_HWTXTRIES, 234 tx_ctl->tx_control_2 = AR5K_REG_SM(tx_tries0,
233 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0); 235 AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
234 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0; 236 tx_ctl->tx_control_3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
235 237
@@ -643,6 +645,7 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
643 rs->rs_status |= AR5K_RXERR_PHY; 645 rs->rs_status |= AR5K_RXERR_PHY;
644 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1, 646 rs->rs_phyerr |= AR5K_REG_MS(rx_err->rx_error_1,
645 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE); 647 AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE);
648 ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
646 } 649 }
647 650
648 if (rx_status->rx_status_1 & 651 if (rx_status->rx_status_1 &
@@ -668,12 +671,6 @@ int ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
668 ah->ah_version != AR5K_AR5212) 671 ah->ah_version != AR5K_AR5212)
669 return -ENOTSUPP; 672 return -ENOTSUPP;
670 673
671 /* XXX: What is this magic value and where is it used ? */
672 if (ah->ah_version == AR5K_AR5212)
673 ah->ah_magic = AR5K_EEPROM_MAGIC_5212;
674 else if (ah->ah_version == AR5K_AR5211)
675 ah->ah_magic = AR5K_EEPROM_MAGIC_5211;
676
677 if (ah->ah_version == AR5K_AR5212) { 674 if (ah->ah_version == AR5K_AR5212) {
678 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc; 675 ah->ah_setup_rx_desc = ath5k_hw_setup_rx_desc;
679 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc; 676 ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index 56158c804e3e..64538fbe4167 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -112,15 +112,32 @@ struct ath5k_hw_rx_error {
112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00 112#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE 0x0000ff00
113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8 113#define AR5K_RX_DESC_ERROR1_PHY_ERROR_CODE_S 8
114 114
115/* PHY Error codes */ 115/**
116#define AR5K_DESC_RX_PHY_ERROR_NONE 0x00 116 * enum ath5k_phy_error_code - PHY Error codes
117#define AR5K_DESC_RX_PHY_ERROR_TIMING 0x20 117 */
118#define AR5K_DESC_RX_PHY_ERROR_PARITY 0x40 118enum ath5k_phy_error_code {
119#define AR5K_DESC_RX_PHY_ERROR_RATE 0x60 119 AR5K_RX_PHY_ERROR_UNDERRUN = 0, /* Transmit underrun */
120#define AR5K_DESC_RX_PHY_ERROR_LENGTH 0x80 120 AR5K_RX_PHY_ERROR_TIMING = 1, /* Timing error */
121#define AR5K_DESC_RX_PHY_ERROR_64QAM 0xa0 121 AR5K_RX_PHY_ERROR_PARITY = 2, /* Illegal parity */
122#define AR5K_DESC_RX_PHY_ERROR_SERVICE 0xc0 122 AR5K_RX_PHY_ERROR_RATE = 3, /* Illegal rate */
123#define AR5K_DESC_RX_PHY_ERROR_TRANSMITOVR 0xe0 123 AR5K_RX_PHY_ERROR_LENGTH = 4, /* Illegal length */
124 AR5K_RX_PHY_ERROR_RADAR = 5, /* Radar detect */
125 AR5K_RX_PHY_ERROR_SERVICE = 6, /* Illegal service */
126 AR5K_RX_PHY_ERROR_TOR = 7, /* Transmit override receive */
127 /* these are specific to the 5212 */
128 AR5K_RX_PHY_ERROR_OFDM_TIMING = 17,
129 AR5K_RX_PHY_ERROR_OFDM_SIGNAL_PARITY = 18,
130 AR5K_RX_PHY_ERROR_OFDM_RATE_ILLEGAL = 19,
131 AR5K_RX_PHY_ERROR_OFDM_LENGTH_ILLEGAL = 20,
132 AR5K_RX_PHY_ERROR_OFDM_POWER_DROP = 21,
133 AR5K_RX_PHY_ERROR_OFDM_SERVICE = 22,
134 AR5K_RX_PHY_ERROR_OFDM_RESTART = 23,
135 AR5K_RX_PHY_ERROR_CCK_TIMING = 25,
136 AR5K_RX_PHY_ERROR_CCK_HEADER_CRC = 26,
137 AR5K_RX_PHY_ERROR_CCK_RATE_ILLEGAL = 27,
138 AR5K_RX_PHY_ERROR_CCK_SERVICE = 30,
139 AR5K_RX_PHY_ERROR_CCK_RESTART = 31,
140};
124 141
125/* 142/*
126 * 5210/5211 hardware 2-word TX control descriptor 143 * 5210/5211 hardware 2-word TX control descriptor
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 67665cdc7afe..ed0263672d6d 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -331,7 +331,8 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
331 ee->ee_x_gain[mode] = (val >> 1) & 0xf; 331 ee->ee_x_gain[mode] = (val >> 1) & 0xf;
332 ee->ee_xpd[mode] = val & 0x1; 332 ee->ee_xpd[mode] = val & 0x1;
333 333
334 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) 334 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
335 mode != AR5K_EEPROM_MODE_11B)
335 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1; 336 ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
336 337
337 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) { 338 if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
@@ -341,6 +342,7 @@ static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
341 if (mode == AR5K_EEPROM_MODE_11A) 342 if (mode == AR5K_EEPROM_MODE_11A)
342 ee->ee_xr_power[mode] = val & 0x3f; 343 ee->ee_xr_power[mode] = val & 0x3f;
343 else { 344 else {
345 /* b_DB_11[bg] and b_OB_11[bg] */
344 ee->ee_ob[mode][0] = val & 0x7; 346 ee->ee_ob[mode][0] = val & 0x7;
345 ee->ee_db[mode][0] = (val >> 3) & 0x7; 347 ee->ee_db[mode][0] = (val >> 3) & 0x7;
346 } 348 }
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index 473a483bb9c3..c4a6d5f26af4 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -24,9 +24,6 @@
24 * SERDES infos are present */ 24 * SERDES infos are present */
25#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */ 25#define AR5K_EEPROM_MAGIC 0x003d /* EEPROM Magic number */
26#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */ 26#define AR5K_EEPROM_MAGIC_VALUE 0x5aa5 /* Default - found on EEPROM */
27#define AR5K_EEPROM_MAGIC_5212 0x0000145c /* 5212 */
28#define AR5K_EEPROM_MAGIC_5211 0x0000145b /* 5211 */
29#define AR5K_EEPROM_MAGIC_5210 0x0000145a /* 5210 */
30 27
31#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */ 28#define AR5K_EEPROM_IS_HB63 0x000b /* Talon detect */
32 29
@@ -78,9 +75,9 @@
78#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1) 75#define AR5K_EEPROM_HDR_11A(_v) (((_v) >> AR5K_EEPROM_MODE_11A) & 0x1)
79#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1) 76#define AR5K_EEPROM_HDR_11B(_v) (((_v) >> AR5K_EEPROM_MODE_11B) & 0x1)
80#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1) 77#define AR5K_EEPROM_HDR_11G(_v) (((_v) >> AR5K_EEPROM_MODE_11G) & 0x1)
81#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz (?) */ 78#define AR5K_EEPROM_HDR_T_2GHZ_DIS(_v) (((_v) >> 3) & 0x1) /* Disable turbo for 2Ghz */
82#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for a/XR mode (eeprom_init) */ 79#define AR5K_EEPROM_HDR_T_5GHZ_DBM(_v) (((_v) >> 4) & 0x7f) /* Max turbo power for < 2W power consumption */
83#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) 80#define AR5K_EEPROM_HDR_DEVICE(_v) (((_v) >> 11) & 0x7) /* Device type (1 Cardbus, 2 PCI, 3 MiniPCI, 4 AP) */
84#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */ 81#define AR5K_EEPROM_HDR_RFKILL(_v) (((_v) >> 14) & 0x1) /* Device has RFKill support */
85#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */ 82#define AR5K_EEPROM_HDR_T_5GHZ_DIS(_v) (((_v) >> 15) & 0x1) /* Disable turbo for 5Ghz */
86 83
@@ -101,7 +98,7 @@
101 98
102#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5) 99#define AR5K_EEPROM_MISC1 AR5K_EEPROM_INFO(5)
103#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff) 100#define AR5K_EEPROM_TARGET_PWRSTART(_v) ((_v) & 0xfff)
104#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) 101#define AR5K_EEPROM_HAS32KHZCRYSTAL(_v) (((_v) >> 14) & 0x1) /* has 32KHz crystal for sleep mode */
105#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1) 102#define AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(_v) (((_v) >> 15) & 0x1)
106 103
107#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6) 104#define AR5K_EEPROM_MISC2 AR5K_EEPROM_INFO(6)
@@ -114,26 +111,27 @@
114 111
115#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8) 112#define AR5K_EEPROM_MISC4 AR5K_EEPROM_INFO(8)
116#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff) 113#define AR5K_EEPROM_CAL_DATA_START(_v) (((_v) >> 4) & 0xfff)
117#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) 114#define AR5K_EEPROM_MASK_R0(_v) (((_v) >> 2) & 0x3) /* modes supported by radio 0 (bit 1: G, bit 2: A) */
118#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) 115#define AR5K_EEPROM_MASK_R1(_v) ((_v) & 0x3) /* modes supported by radio 1 (bit 1: G, bit 2: A) */
119 116
120#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9) 117#define AR5K_EEPROM_MISC5 AR5K_EEPROM_INFO(9)
121#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) 118#define AR5K_EEPROM_COMP_DIS(_v) ((_v) & 0x1) /* disable compression */
122#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) 119#define AR5K_EEPROM_AES_DIS(_v) (((_v) >> 1) & 0x1) /* disable AES */
123#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) 120#define AR5K_EEPROM_FF_DIS(_v) (((_v) >> 2) & 0x1) /* disable fast frames */
124#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) 121#define AR5K_EEPROM_BURST_DIS(_v) (((_v) >> 3) & 0x1) /* disable bursting */
125#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) 122#define AR5K_EEPROM_MAX_QCU(_v) (((_v) >> 4) & 0xf) /* max number of QCUs. defaults to 10 */
126#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) 123#define AR5K_EEPROM_HEAVY_CLIP_EN(_v) (((_v) >> 8) & 0x1) /* enable heayy clipping */
127#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) 124#define AR5K_EEPROM_KEY_CACHE_SIZE(_v) (((_v) >> 12) & 0xf) /* key cache size. defaults to 128 */
128 125
129#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10) 126#define AR5K_EEPROM_MISC6 AR5K_EEPROM_INFO(10)
130#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x8) 127#define AR5K_EEPROM_TX_CHAIN_DIS ((_v) & 0x7) /* MIMO chains disabled for TX bitmask */
131#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x8) 128#define AR5K_EEPROM_RX_CHAIN_DIS (((_v) >> 3) & 0x7) /* MIMO chains disabled for RX bitmask */
132#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) 129#define AR5K_EEPROM_FCC_MID_EN (((_v) >> 6) & 0x1) /* 5.47-5.7GHz supported */
133#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) 130#define AR5K_EEPROM_JAP_U1EVEN_EN (((_v) >> 7) & 0x1) /* Japan UNII1 band (5.15-5.25GHz) on even channels (5180, 5200, 5220, 5240) supported */
134#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) 131#define AR5K_EEPROM_JAP_U2_EN (((_v) >> 8) & 0x1) /* Japan UNII2 band (5.25-5.35GHz) supported */
135#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 9) & 0x1) 132#define AR5K_EEPROM_JAP_MID_EN (((_v) >> 9) & 0x1) /* Japan band from 5.47-5.7GHz supported */
136#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 10) & 0x1) 133#define AR5K_EEPROM_JAP_U1ODD_EN (((_v) >> 10) & 0x1) /* Japan UNII2 band (5.15-5.25GHz) on odd channels (5170, 5190, 5210, 5230) supported */
134#define AR5K_EEPROM_JAP_11A_NEW_EN (((_v) >> 11) & 0x1) /* Japan A mode enabled (using even channels) */
137 135
138/* calibration settings */ 136/* calibration settings */
139#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4) 137#define AR5K_EEPROM_MODES_11A(_v) AR5K_EEPROM_OFF(_v, 0x00c5, 0x00d4)
@@ -389,7 +387,49 @@ struct ath5k_edge_power {
389 bool flag; 387 bool flag;
390}; 388};
391 389
392/* EEPROM calibration data */ 390/**
391 * struct ath5k_eeprom_info - EEPROM calibration data
392 *
393 * @ee_regdomain: ath/regd.c takes care of COUNTRY_ERD and WORLDWIDE_ROAMING
394 * flags
395 * @ee_ant_gain: Antenna gain in 0.5dB steps signed [5211 only?]
396 * @ee_cck_ofdm_gain_delta: difference in gainF to output the same power for
397 * OFDM and CCK packets
398 * @ee_cck_ofdm_power_delta: power difference between OFDM (6Mbps) and CCK
399 * (11Mbps) rate in G mode. 0.1dB steps
400 * @ee_scaled_cck_delta: for Japan Channel 14: 0.1dB resolution
401 *
402 * @ee_i_cal: Initial I coefficient to correct I/Q mismatch in the receive path
403 * @ee_q_cal: Initial Q coefficient to correct I/Q mismatch in the receive path
404 * @ee_fixed_bias: use ee_ob and ee_db settings or use automatic control
405 * @ee_switch_settling: RX/TX Switch settling time
406 * @ee_atn_tx_rx: Difference in attenuation between TX and RX in 1dB steps
407 * @ee_ant_control: Antenna Control Settings
408 * @ee_ob: Bias current for Output stage of PA
409 * B/G mode: Index [0] is used for AR2112/5112, otherwise [1]
410 * A mode: [0] 5.15-5.25 [1] 5.25-5.50 [2] 5.50-5.70 [3] 5.70-5.85 GHz
411 * @ee_db: Bias current for Output stage of PA. see @ee_ob
412 * @ee_tx_end2xlna_enable: Time difference from when BB finishes sending a frame
413 * to when the external LNA is activated
414 * @ee_tx_end2xpa_disable: Time difference from when BB finishes sending a frame
415 * to when the external PA switch is deactivated
416 * @ee_tx_frm2xpa_enable: Time difference from when MAC sends frame to when
417 * external PA switch is activated
418 * @ee_thr_62: Clear Channel Assessment (CCA) sensitivity
419 * (IEEE802.11a section 17.3.10.5 )
420 * @ee_xlna_gain: Total gain of the LNA (information only)
421 * @ee_xpd: Use external (1) or internal power detector
422 * @ee_x_gain: Gain for external power detector output (differences in EEMAP
423 * versions!)
424 * @ee_i_gain: Initial gain value after reset
425 * @ee_margin_tx_rx: Margin in dB when final attenuation stage should be used
426 *
427 * @ee_false_detect: Backoff in Sensitivity (dB) on channels with spur signals
428 * @ee_noise_floor_thr: Noise floor threshold in 1dB steps
429 * @ee_adc_desired_size: Desired amplitude for ADC, used by AGC; in 0.5 dB steps
430 * @ee_pga_desired_size: Desired output of PGA (for BB gain) in 0.5 dB steps
431 * @ee_pd_gain_overlap: PD ADC curves need to overlap in 0.5dB steps (ee_map>=2)
432 */
393struct ath5k_eeprom_info { 433struct ath5k_eeprom_info {
394 434
395 /* Header information */ 435 /* Header information */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index aefe84f9c04b..174412fc81f8 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -39,16 +39,16 @@
39 * ath5k_hw_set_opmode - Set PCU operating mode 39 * ath5k_hw_set_opmode - Set PCU operating mode
40 * 40 *
41 * @ah: The &struct ath5k_hw 41 * @ah: The &struct ath5k_hw
42 * @op_mode: &enum nl80211_iftype operating mode
42 * 43 *
43 * Initialize PCU for the various operating modes (AP/STA etc) 44 * Initialize PCU for the various operating modes (AP/STA etc)
44 *
45 * NOTE: ah->ah_op_mode must be set before calling this.
46 */ 45 */
47int ath5k_hw_set_opmode(struct ath5k_hw *ah) 46int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
48{ 47{
49 struct ath_common *common = ath5k_hw_common(ah); 48 struct ath_common *common = ath5k_hw_common(ah);
50 u32 pcu_reg, beacon_reg, low_id, high_id; 49 u32 pcu_reg, beacon_reg, low_id, high_id;
51 50
51 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
52 52
53 /* Preserve rest settings */ 53 /* Preserve rest settings */
54 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; 54 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
@@ -61,7 +61,7 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
61 61
62 ATH5K_TRACE(ah->ah_sc); 62 ATH5K_TRACE(ah->ah_sc);
63 63
64 switch (ah->ah_op_mode) { 64 switch (op_mode) {
65 case NL80211_IFTYPE_ADHOC: 65 case NL80211_IFTYPE_ADHOC:
66 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE; 66 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
67 beacon_reg |= AR5K_BCR_ADHOC; 67 beacon_reg |= AR5K_BCR_ADHOC;
@@ -113,39 +113,26 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
113} 113}
114 114
115/** 115/**
116 * ath5k_hw_update - Update mib counters (mac layer statistics) 116 * ath5k_hw_update - Update MIB counters (mac layer statistics)
117 * 117 *
118 * @ah: The &struct ath5k_hw 118 * @ah: The &struct ath5k_hw
119 * @stats: The &struct ieee80211_low_level_stats we use to track
120 * statistics on the driver
121 * 119 *
122 * Reads MIB counters from PCU and updates sw statistics. Must be 120 * Reads MIB counters from PCU and updates sw statistics. Is called after a
123 * called after a MIB interrupt. 121 * MIB interrupt, because one of these counters might have reached their maximum
122 * and triggered the MIB interrupt, to let us read and clear the counter.
123 *
124 * Is called in interrupt context!
124 */ 125 */
125void ath5k_hw_update_mib_counters(struct ath5k_hw *ah, 126void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
126 struct ieee80211_low_level_stats *stats)
127{ 127{
128 ATH5K_TRACE(ah->ah_sc); 128 struct ath5k_statistics *stats = &ah->ah_sc->stats;
129 129
130 /* Read-And-Clear */ 130 /* Read-And-Clear */
131 stats->dot11ACKFailureCount += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL); 131 stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
132 stats->dot11RTSFailureCount += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL); 132 stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
133 stats->dot11RTSSuccessCount += ath5k_hw_reg_read(ah, AR5K_RTS_OK); 133 stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
134 stats->dot11FCSErrorCount += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL); 134 stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
135 135 stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
136 /* XXX: Should we use this to track beacon count ?
137 * -we read it anyway to clear the register */
138 ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
139
140 /* Reset profile count registers on 5212*/
141 if (ah->ah_version == AR5K_AR5212) {
142 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_TX);
143 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RX);
144 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_RXCLR);
145 ath5k_hw_reg_write(ah, 0, AR5K_PROFCNT_CYCLE);
146 }
147
148 /* TODO: Handle ANI stats */
149} 136}
150 137
151/** 138/**
@@ -167,9 +154,9 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
167 else { 154 else {
168 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; 155 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
169 if (high) 156 if (high)
170 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
171 else
172 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); 157 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
158 else
159 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
173 } 160 }
174} 161}
175 162
@@ -179,25 +166,12 @@ void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high)
179\******************/ 166\******************/
180 167
181/** 168/**
182 * ath5k_hw_het_ack_timeout - Get ACK timeout from PCU in usec
183 *
184 * @ah: The &struct ath5k_hw
185 */
186unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
187{
188 ATH5K_TRACE(ah->ah_sc);
189
190 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
191 AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
192}
193
194/**
195 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU 169 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
196 * 170 *
197 * @ah: The &struct ath5k_hw 171 * @ah: The &struct ath5k_hw
198 * @timeout: Timeout in usec 172 * @timeout: Timeout in usec
199 */ 173 */
200int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout) 174static int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
201{ 175{
202 ATH5K_TRACE(ah->ah_sc); 176 ATH5K_TRACE(ah->ah_sc);
203 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK)) 177 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
@@ -211,24 +185,12 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
211} 185}
212 186
213/** 187/**
214 * ath5k_hw_get_cts_timeout - Get CTS timeout from PCU in usec
215 *
216 * @ah: The &struct ath5k_hw
217 */
218unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
219{
220 ATH5K_TRACE(ah->ah_sc);
221 return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
222 AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
223}
224
225/**
226 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU 188 * ath5k_hw_set_cts_timeout - Set CTS timeout on PCU
227 * 189 *
228 * @ah: The &struct ath5k_hw 190 * @ah: The &struct ath5k_hw
229 * @timeout: Timeout in usec 191 * @timeout: Timeout in usec
230 */ 192 */
231int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout) 193static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
232{ 194{
233 ATH5K_TRACE(ah->ah_sc); 195 ATH5K_TRACE(ah->ah_sc);
234 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS)) 196 if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
@@ -290,7 +252,7 @@ unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
290 * 252 *
291 * @ah: The &struct ath5k_hw 253 * @ah: The &struct ath5k_hw
292 */ 254 */
293unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah) 255static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
294{ 256{
295 struct ieee80211_channel *channel = ah->ah_current_channel; 257 struct ieee80211_channel *channel = ah->ah_current_channel;
296 258
@@ -308,7 +270,7 @@ unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
308 * 270 *
309 * @ah: The &struct ath5k_hw 271 * @ah: The &struct ath5k_hw
310 */ 272 */
311unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah) 273static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
312{ 274{
313 struct ieee80211_channel *channel = ah->ah_current_channel; 275 struct ieee80211_channel *channel = ah->ah_current_channel;
314 276
@@ -417,7 +379,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
417 * (ACK etc). 379 * (ACK etc).
418 * 380 *
419 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma 381 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
420 * TODO: Init ANI here
421 */ 382 */
422void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah) 383void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
423{ 384{
@@ -451,42 +412,6 @@ void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
451 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1); 412 ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
452} 413}
453 414
454/*
455 * Set multicast filter by index
456 */
457int ath5k_hw_set_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
458{
459
460 ATH5K_TRACE(ah->ah_sc);
461 if (index >= 64)
462 return -EINVAL;
463 else if (index >= 32)
464 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER1,
465 (1 << (index - 32)));
466 else
467 AR5K_REG_ENABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
468
469 return 0;
470}
471
472/*
473 * Clear Multicast filter by index
474 */
475int ath5k_hw_clear_mcast_filter_idx(struct ath5k_hw *ah, u32 index)
476{
477
478 ATH5K_TRACE(ah->ah_sc);
479 if (index >= 64)
480 return -EINVAL;
481 else if (index >= 32)
482 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER1,
483 (1 << (index - 32)));
484 else
485 AR5K_REG_DISABLE_BITS(ah, AR5K_MCAST_FILTER0, (1 << index));
486
487 return 0;
488}
489
490/** 415/**
491 * ath5k_hw_get_rx_filter - Get current rx filter 416 * ath5k_hw_get_rx_filter - Get current rx filter
492 * 417 *
@@ -572,19 +497,6 @@ void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
572\****************/ 497\****************/
573 498
574/** 499/**
575 * ath5k_hw_get_tsf32 - Get a 32bit TSF
576 *
577 * @ah: The &struct ath5k_hw
578 *
579 * Returns lower 32 bits of current TSF
580 */
581u32 ath5k_hw_get_tsf32(struct ath5k_hw *ah)
582{
583 ATH5K_TRACE(ah->ah_sc);
584 return ath5k_hw_reg_read(ah, AR5K_TSF_L32);
585}
586
587/**
588 * ath5k_hw_get_tsf64 - Get the full 64bit TSF 500 * ath5k_hw_get_tsf64 - Get the full 64bit TSF
589 * 501 *
590 * @ah: The &struct ath5k_hw 502 * @ah: The &struct ath5k_hw
@@ -651,7 +563,7 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
651 /* 563 /*
652 * Set the additional timers by mode 564 * Set the additional timers by mode
653 */ 565 */
654 switch (ah->ah_op_mode) { 566 switch (ah->ah_sc->opmode) {
655 case NL80211_IFTYPE_MONITOR: 567 case NL80211_IFTYPE_MONITOR:
656 case NL80211_IFTYPE_STATION: 568 case NL80211_IFTYPE_STATION:
657 /* In STA mode timer1 is used as next wakeup 569 /* In STA mode timer1 is used as next wakeup
@@ -688,8 +600,8 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
688 * Set the beacon register and enable all timers. 600 * Set the beacon register and enable all timers.
689 */ 601 */
690 /* When in AP or Mesh Point mode zero timer0 to start TSF */ 602 /* When in AP or Mesh Point mode zero timer0 to start TSF */
691 if (ah->ah_op_mode == NL80211_IFTYPE_AP || 603 if (ah->ah_sc->opmode == NL80211_IFTYPE_AP ||
692 ah->ah_op_mode == NL80211_IFTYPE_MESH_POINT) 604 ah->ah_sc->opmode == NL80211_IFTYPE_MESH_POINT)
693 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0); 605 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
694 606
695 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0); 607 ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
@@ -722,203 +634,6 @@ void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
722 634
723} 635}
724 636
725#if 0
726/*
727 * Set beacon timers
728 */
729int ath5k_hw_set_beacon_timers(struct ath5k_hw *ah,
730 const struct ath5k_beacon_state *state)
731{
732 u32 cfp_period, next_cfp, dtim, interval, next_beacon;
733
734 /*
735 * TODO: should be changed through *state
736 * review struct ath5k_beacon_state struct
737 *
738 * XXX: These are used for cfp period bellow, are they
739 * ok ? Is it O.K. for tsf here to be 0 or should we use
740 * get_tsf ?
741 */
742 u32 dtim_count = 0; /* XXX */
743 u32 cfp_count = 0; /* XXX */
744 u32 tsf = 0; /* XXX */
745
746 ATH5K_TRACE(ah->ah_sc);
747 /* Return on an invalid beacon state */
748 if (state->bs_interval < 1)
749 return -EINVAL;
750
751 interval = state->bs_interval;
752 dtim = state->bs_dtim_period;
753
754 /*
755 * PCF support?
756 */
757 if (state->bs_cfp_period > 0) {
758 /*
759 * Enable PCF mode and set the CFP
760 * (Contention Free Period) and timer registers
761 */
762 cfp_period = state->bs_cfp_period * state->bs_dtim_period *
763 state->bs_interval;
764 next_cfp = (cfp_count * state->bs_dtim_period + dtim_count) *
765 state->bs_interval;
766
767 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
768 AR5K_STA_ID1_DEFAULT_ANTENNA |
769 AR5K_STA_ID1_PCF);
770 ath5k_hw_reg_write(ah, cfp_period, AR5K_CFP_PERIOD);
771 ath5k_hw_reg_write(ah, state->bs_cfp_max_duration,
772 AR5K_CFP_DUR);
773 ath5k_hw_reg_write(ah, (tsf + (next_cfp == 0 ? cfp_period :
774 next_cfp)) << 3, AR5K_TIMER2);
775 } else {
776 /* Disable PCF mode */
777 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
778 AR5K_STA_ID1_DEFAULT_ANTENNA |
779 AR5K_STA_ID1_PCF);
780 }
781
782 /*
783 * Enable the beacon timer register
784 */
785 ath5k_hw_reg_write(ah, state->bs_next_beacon, AR5K_TIMER0);
786
787 /*
788 * Start the beacon timers
789 */
790 ath5k_hw_reg_write(ah, (ath5k_hw_reg_read(ah, AR5K_BEACON) &
791 ~(AR5K_BEACON_PERIOD | AR5K_BEACON_TIM)) |
792 AR5K_REG_SM(state->bs_tim_offset ? state->bs_tim_offset + 4 : 0,
793 AR5K_BEACON_TIM) | AR5K_REG_SM(state->bs_interval,
794 AR5K_BEACON_PERIOD), AR5K_BEACON);
795
796 /*
797 * Write new beacon miss threshold, if it appears to be valid
798 * XXX: Figure out right values for min <= bs_bmiss_threshold <= max
799 * and return if its not in range. We can test this by reading value and
800 * setting value to a largest value and seeing which values register.
801 */
802
803 AR5K_REG_WRITE_BITS(ah, AR5K_RSSI_THR, AR5K_RSSI_THR_BMISS,
804 state->bs_bmiss_threshold);
805
806 /*
807 * Set sleep control register
808 * XXX: Didn't find this in 5210 code but since this register
809 * exists also in ar5k's 5210 headers i leave it as common code.
810 */
811 AR5K_REG_WRITE_BITS(ah, AR5K_SLEEP_CTL, AR5K_SLEEP_CTL_SLDUR,
812 (state->bs_sleep_duration - 3) << 3);
813
814 /*
815 * Set enhanced sleep registers on 5212
816 */
817 if (ah->ah_version == AR5K_AR5212) {
818 if (state->bs_sleep_duration > state->bs_interval &&
819 roundup(state->bs_sleep_duration, interval) ==
820 state->bs_sleep_duration)
821 interval = state->bs_sleep_duration;
822
823 if (state->bs_sleep_duration > dtim && (dtim == 0 ||
824 roundup(state->bs_sleep_duration, dtim) ==
825 state->bs_sleep_duration))
826 dtim = state->bs_sleep_duration;
827
828 if (interval > dtim)
829 return -EINVAL;
830
831 next_beacon = interval == dtim ? state->bs_next_dtim :
832 state->bs_next_beacon;
833
834 ath5k_hw_reg_write(ah,
835 AR5K_REG_SM((state->bs_next_dtim - 3) << 3,
836 AR5K_SLEEP0_NEXT_DTIM) |
837 AR5K_REG_SM(10, AR5K_SLEEP0_CABTO) |
838 AR5K_SLEEP0_ENH_SLEEP_EN |
839 AR5K_SLEEP0_ASSUME_DTIM, AR5K_SLEEP0);
840
841 ath5k_hw_reg_write(ah, AR5K_REG_SM((next_beacon - 3) << 3,
842 AR5K_SLEEP1_NEXT_TIM) |
843 AR5K_REG_SM(10, AR5K_SLEEP1_BEACON_TO), AR5K_SLEEP1);
844
845 ath5k_hw_reg_write(ah,
846 AR5K_REG_SM(interval, AR5K_SLEEP2_TIM_PER) |
847 AR5K_REG_SM(dtim, AR5K_SLEEP2_DTIM_PER), AR5K_SLEEP2);
848 }
849
850 return 0;
851}
852
853/*
854 * Reset beacon timers
855 */
856void ath5k_hw_reset_beacon(struct ath5k_hw *ah)
857{
858 ATH5K_TRACE(ah->ah_sc);
859 /*
860 * Disable beacon timer
861 */
862 ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
863
864 /*
865 * Disable some beacon register values
866 */
867 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
868 AR5K_STA_ID1_DEFAULT_ANTENNA | AR5K_STA_ID1_PCF);
869 ath5k_hw_reg_write(ah, AR5K_BEACON_PERIOD, AR5K_BEACON);
870}
871
872/*
873 * Wait for beacon queue to finish
874 */
875int ath5k_hw_beaconq_finish(struct ath5k_hw *ah, unsigned long phys_addr)
876{
877 unsigned int i;
878 int ret;
879
880 ATH5K_TRACE(ah->ah_sc);
881
882 /* 5210 doesn't have QCU*/
883 if (ah->ah_version == AR5K_AR5210) {
884 /*
885 * Wait for beaconn queue to finish by checking
886 * Control Register and Beacon Status Register.
887 */
888 for (i = AR5K_TUNE_BEACON_INTERVAL / 2; i > 0; i--) {
889 if (!(ath5k_hw_reg_read(ah, AR5K_BSR) & AR5K_BSR_TXQ1F)
890 ||
891 !(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_BSR_TXQ1F))
892 break;
893 udelay(10);
894 }
895
896 /* Timeout... */
897 if (i <= 0) {
898 /*
899 * Re-schedule the beacon queue
900 */
901 ath5k_hw_reg_write(ah, phys_addr, AR5K_NOQCU_TXDP1);
902 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
903 AR5K_BCR);
904
905 return -EIO;
906 }
907 ret = 0;
908 } else {
909 /*5211/5212*/
910 ret = ath5k_hw_register_timeout(ah,
911 AR5K_QUEUE_STATUS(AR5K_TX_QUEUE_ID_BEACON),
912 AR5K_QCU_STS_FRMPENDCNT, 0, false);
913
914 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, AR5K_TX_QUEUE_ID_BEACON))
915 return -EIO;
916 }
917
918 return ret;
919}
920#endif
921
922 637
923/*********************\ 638/*********************\
924* Key table functions * 639* Key table functions *
@@ -971,19 +686,6 @@ int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry)
971 return 0; 686 return 0;
972} 687}
973 688
974/*
975 * Check if a table entry is valid
976 */
977int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry)
978{
979 ATH5K_TRACE(ah->ah_sc);
980 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
981
982 /* Check the validation flag at the end of the entry */
983 return ath5k_hw_reg_read(ah, AR5K_KEYTABLE_MAC1(entry)) &
984 AR5K_KEYTABLE_VALID;
985}
986
987static 689static
988int ath5k_keycache_type(const struct ieee80211_key_conf *key) 690int ath5k_keycache_type(const struct ieee80211_key_conf *key)
989{ 691{
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 68e2bccd90d3..3ce9afba1d88 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -20,8 +20,6 @@
20 * 20 *
21 */ 21 */
22 22
23#define _ATH5K_PHY
24
25#include <linux/delay.h> 23#include <linux/delay.h>
26#include <linux/slab.h> 24#include <linux/slab.h>
27 25
@@ -982,7 +980,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
982 return -EINVAL; 980 return -EINVAL;
983 981
984 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8); 982 data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
985 } else if ((c - (c % 5)) != 2 || c > 5435) { 983 } else if ((c % 5) != 2 || c > 5435) {
986 if (!(c % 20) && c >= 5120) { 984 if (!(c % 20) && c >= 5120) {
987 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 985 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
988 data2 = ath5k_hw_bitswap(3, 2); 986 data2 = ath5k_hw_bitswap(3, 2);
@@ -995,7 +993,7 @@ static int ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
995 } else 993 } else
996 return -EINVAL; 994 return -EINVAL;
997 } else { 995 } else {
998 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); 996 data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
999 data2 = ath5k_hw_bitswap(0, 2); 997 data2 = ath5k_hw_bitswap(0, 2);
1000 } 998 }
1001 999
@@ -1023,7 +1021,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1023 data0 = ath5k_hw_bitswap((c - 2272), 8); 1021 data0 = ath5k_hw_bitswap((c - 2272), 8);
1024 data2 = 0; 1022 data2 = 0;
1025 /* ? 5GHz ? */ 1023 /* ? 5GHz ? */
1026 } else if ((c - (c % 5)) != 2 || c > 5435) { 1024 } else if ((c % 5) != 2 || c > 5435) {
1027 if (!(c % 20) && c < 5120) 1025 if (!(c % 20) && c < 5120)
1028 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8); 1026 data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
1029 else if (!(c % 10)) 1027 else if (!(c % 10))
@@ -1034,7 +1032,7 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1034 return -EINVAL; 1032 return -EINVAL;
1035 data2 = ath5k_hw_bitswap(1, 2); 1033 data2 = ath5k_hw_bitswap(1, 2);
1036 } else { 1034 } else {
1037 data0 = ath5k_hw_bitswap((10 * (c - 2) - 4800) / 25 + 1, 8); 1035 data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
1038 data2 = ath5k_hw_bitswap(0, 2); 1036 data2 = ath5k_hw_bitswap(0, 2);
1039 } 1037 }
1040 1038
@@ -1105,28 +1103,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1105 PHY calibration 1103 PHY calibration
1106\*****************/ 1104\*****************/
1107 1105
1108void
1109ath5k_hw_calibration_poll(struct ath5k_hw *ah)
1110{
1111 /* Calibration interval in jiffies */
1112 unsigned long cal_intval;
1113
1114 cal_intval = msecs_to_jiffies(ah->ah_cal_intval * 1000);
1115
1116 /* Initialize timestamp if needed */
1117 if (!ah->ah_cal_tstamp)
1118 ah->ah_cal_tstamp = jiffies;
1119
1120 /* For now we always do full calibration
1121 * Mark software interrupt mask and fire software
1122 * interrupt (bit gets auto-cleared) */
1123 if (time_is_before_eq_jiffies(ah->ah_cal_tstamp + cal_intval)) {
1124 ah->ah_cal_tstamp = jiffies;
1125 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
1126 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
1127 }
1128}
1129
1130static int sign_extend(int val, const int nbits) 1106static int sign_extend(int val, const int nbits)
1131{ 1107{
1132 int order = BIT(nbits-1); 1108 int order = BIT(nbits-1);
@@ -1191,7 +1167,7 @@ static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1191 * The median of the values in the history is then loaded into the 1167 * The median of the values in the history is then loaded into the
1192 * hardware for its own use for RSSI and CCA measurements. 1168 * hardware for its own use for RSSI and CCA measurements.
1193 */ 1169 */
1194void ath5k_hw_update_noise_floor(struct ath5k_hw *ah) 1170static void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1195{ 1171{
1196 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 1172 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1197 u32 val; 1173 u32 val;
@@ -1400,7 +1376,11 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1400 } 1376 }
1401 1377
1402 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7; 1378 i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
1403 q_coffd = q_pwr >> 7; 1379
1380 if (ah->ah_version == AR5K_AR5211)
1381 q_coffd = q_pwr >> 6;
1382 else
1383 q_coffd = q_pwr >> 7;
1404 1384
1405 /* protect against divide by 0 and loss of sign bits */ 1385 /* protect against divide by 0 and loss of sign bits */
1406 if (i_coffd == 0 || q_coffd < 2) 1386 if (i_coffd == 0 || q_coffd < 2)
@@ -1409,7 +1389,10 @@ static int ath5k_hw_rf511x_calibrate(struct ath5k_hw *ah,
1409 i_coff = (-iq_corr) / i_coffd; 1389 i_coff = (-iq_corr) / i_coffd;
1410 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */ 1390 i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
1411 1391
1412 q_coff = (i_pwr / q_coffd) - 128; 1392 if (ah->ah_version == AR5K_AR5211)
1393 q_coff = (i_pwr / q_coffd) - 64;
1394 else
1395 q_coff = (i_pwr / q_coffd) - 128;
1413 q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */ 1396 q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
1414 1397
1415 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE, 1398 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
@@ -1769,7 +1752,7 @@ u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
1769* Antenna control * 1752* Antenna control *
1770\*****************/ 1753\*****************/
1771 1754
1772void /*TODO:Boundary check*/ 1755static void /*TODO:Boundary check*/
1773ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant) 1756ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1774{ 1757{
1775 ATH5K_TRACE(ah->ah_sc); 1758 ATH5K_TRACE(ah->ah_sc);
@@ -1778,16 +1761,6 @@ ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
1778 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA); 1761 ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
1779} 1762}
1780 1763
1781unsigned int ath5k_hw_get_def_antenna(struct ath5k_hw *ah)
1782{
1783 ATH5K_TRACE(ah->ah_sc);
1784
1785 if (ah->ah_version != AR5K_AR5210)
1786 return ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA) & 0x7;
1787
1788 return false; /*XXX: What do we return for 5210 ?*/
1789}
1790
1791/* 1764/*
1792 * Enable/disable fast rx antenna diversity 1765 * Enable/disable fast rx antenna diversity
1793 */ 1766 */
@@ -1931,6 +1904,7 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1931 1904
1932 ah->ah_tx_ant = tx_ant; 1905 ah->ah_tx_ant = tx_ant;
1933 ah->ah_ant_mode = ant_mode; 1906 ah->ah_ant_mode = ant_mode;
1907 ah->ah_def_ant = def_ant;
1934 1908
1935 sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0; 1909 sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
1936 sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0; 1910 sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
@@ -2441,19 +2415,6 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
2441 pcdac_tmp = pcdac_high_pwr; 2415 pcdac_tmp = pcdac_high_pwr;
2442 2416
2443 edge_flag = 0x40; 2417 edge_flag = 0x40;
2444#if 0
2445 /* If both min and max power limits are in lower
2446 * power curve's range, only use the low power curve.
2447 * TODO: min/max levels are related to target
2448 * power values requested from driver/user
2449 * XXX: Is this really needed ? */
2450 if (min_pwr < table_max[1] &&
2451 max_pwr < table_max[1]) {
2452 edge_flag = 0;
2453 pcdac_tmp = pcdac_low_pwr;
2454 max_pwr_idx = (table_max[1] - table_min[1])/2;
2455 }
2456#endif
2457 } else { 2418 } else {
2458 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */ 2419 pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
2459 pcdac_high_pwr = ah->ah_txpower.tmpL[0]; 2420 pcdac_high_pwr = ah->ah_txpower.tmpL[0];
@@ -2600,7 +2561,7 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2600 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size; 2561 max_idx = (pdadc_n < table_size) ? pdadc_n : table_size;
2601 2562
2602 /* Fill pdadc_out table */ 2563 /* Fill pdadc_out table */
2603 while (pdadc_0 < max_idx) 2564 while (pdadc_0 < max_idx && pdadc_i < 128)
2604 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++]; 2565 pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
2605 2566
2606 /* Need to extrapolate above this pdgain? */ 2567 /* Need to extrapolate above this pdgain? */
@@ -3144,5 +3105,3 @@ int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3144 3105
3145 return ath5k_hw_txpower(ah, channel, ee_mode, txpower); 3106 return ath5k_hw_txpower(ah, channel, ee_mode, txpower);
3146} 3107}
3147
3148#undef _ATH5K_PHY
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 9122a8556f45..f5831da33f7b 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -517,23 +517,6 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
517} 517}
518 518
519/* 519/*
520 * Get slot time from DCU
521 */
522unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
523{
524 unsigned int slot_time_clock;
525
526 ATH5K_TRACE(ah->ah_sc);
527
528 if (ah->ah_version == AR5K_AR5210)
529 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
530 else
531 slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
532
533 return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
534}
535
536/*
537 * Set slot time on DCU 520 * Set slot time on DCU
538 */ 521 */
539int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 522int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 1464f89b249c..55b4ac6d236f 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -212,10 +212,10 @@
212 * MIB control register 212 * MIB control register
213 */ 213 */
214#define AR5K_MIBC 0x0040 /* Register Address */ 214#define AR5K_MIBC 0x0040 /* Register Address */
215#define AR5K_MIBC_COW 0x00000001 /* Warn test indicator */ 215#define AR5K_MIBC_COW 0x00000001 /* Counter Overflow Warning */
216#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */ 216#define AR5K_MIBC_FMC 0x00000002 /* Freeze MIB Counters */
217#define AR5K_MIBC_CMC 0x00000004 /* Clean MIB Counters */ 217#define AR5K_MIBC_CMC 0x00000004 /* Clear MIB Counters */
218#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe */ 218#define AR5K_MIBC_MCS 0x00000008 /* MIB counter strobe, increment all */
219 219
220/* 220/*
221 * Timeout prescale register 221 * Timeout prescale register
@@ -1139,8 +1139,8 @@
1139#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */ 1139#define AR5K_STA_ID1_DEFAULT_ANTENNA 0x00200000 /* Use default antenna */
1140#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */ 1140#define AR5K_STA_ID1_DESC_ANTENNA 0x00400000 /* Update antenna from descriptor */
1141#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */ 1141#define AR5K_STA_ID1_RTS_DEF_ANTENNA 0x00800000 /* Use default antenna for RTS */
1142#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Use 6Mbit/s for ACK/CTS */ 1142#define AR5K_STA_ID1_ACKCTS_6MB 0x01000000 /* Rate to use for ACK/CTS. 0: highest mandatory rate <= RX rate; 1: 1Mbps in B mode */
1143#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* Use 11b base rate for ACK/CTS [5211+] */ 1143#define AR5K_STA_ID1_BASE_RATE_11B 0x02000000 /* 802.11b base rate. 0: 1, 2, 5.5 and 11Mbps; 1: 1 and 2Mbps. [5211+] */
1144#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */ 1144#define AR5K_STA_ID1_SELFGEN_DEF_ANT 0x04000000 /* Use def. antenna for self generated frames */
1145#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */ 1145#define AR5K_STA_ID1_CRYPT_MIC_EN 0x08000000 /* Enable MIC */
1146#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */ 1146#define AR5K_STA_ID1_KEYSRCH_MODE 0x10000000 /* Look up key when key id != 0 */
@@ -1516,7 +1516,14 @@
1516 AR5K_NAV_5210 : AR5K_NAV_5211) 1516 AR5K_NAV_5210 : AR5K_NAV_5211)
1517 1517
1518/* 1518/*
1519 * RTS success register 1519 * MIB counters:
1520 *
1521 * max value is 0xc000, if this is reached we get a MIB interrupt.
1522 * they can be controlled via AR5K_MIBC and are cleared on read.
1523 */
1524
1525/*
1526 * RTS success (MIB counter)
1520 */ 1527 */
1521#define AR5K_RTS_OK_5210 0x8090 1528#define AR5K_RTS_OK_5210 0x8090
1522#define AR5K_RTS_OK_5211 0x8088 1529#define AR5K_RTS_OK_5211 0x8088
@@ -1524,7 +1531,7 @@
1524 AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211) 1531 AR5K_RTS_OK_5210 : AR5K_RTS_OK_5211)
1525 1532
1526/* 1533/*
1527 * RTS failure register 1534 * RTS failure (MIB counter)
1528 */ 1535 */
1529#define AR5K_RTS_FAIL_5210 0x8094 1536#define AR5K_RTS_FAIL_5210 0x8094
1530#define AR5K_RTS_FAIL_5211 0x808c 1537#define AR5K_RTS_FAIL_5211 0x808c
@@ -1532,7 +1539,7 @@
1532 AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211) 1539 AR5K_RTS_FAIL_5210 : AR5K_RTS_FAIL_5211)
1533 1540
1534/* 1541/*
1535 * ACK failure register 1542 * ACK failure (MIB counter)
1536 */ 1543 */
1537#define AR5K_ACK_FAIL_5210 0x8098 1544#define AR5K_ACK_FAIL_5210 0x8098
1538#define AR5K_ACK_FAIL_5211 0x8090 1545#define AR5K_ACK_FAIL_5211 0x8090
@@ -1540,7 +1547,7 @@
1540 AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211) 1547 AR5K_ACK_FAIL_5210 : AR5K_ACK_FAIL_5211)
1541 1548
1542/* 1549/*
1543 * FCS failure register 1550 * FCS failure (MIB counter)
1544 */ 1551 */
1545#define AR5K_FCS_FAIL_5210 0x809c 1552#define AR5K_FCS_FAIL_5210 0x809c
1546#define AR5K_FCS_FAIL_5211 0x8094 1553#define AR5K_FCS_FAIL_5211 0x8094
@@ -1667,11 +1674,17 @@
1667 1674
1668/* 1675/*
1669 * Profile count registers 1676 * Profile count registers
1677 *
1678 * These registers can be cleared and freezed with ATH5K_MIBC, but they do not
1679 * generate a MIB interrupt.
1680 * Instead of overflowing, they shift by one bit to the right. All registers
1681 * shift together, i.e. when one reaches the max, all shift at the same time by
1682 * one bit to the right. This way we should always get consistent values.
1670 */ 1683 */
1671#define AR5K_PROFCNT_TX 0x80ec /* Tx count */ 1684#define AR5K_PROFCNT_TX 0x80ec /* Tx count */
1672#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */ 1685#define AR5K_PROFCNT_RX 0x80f0 /* Rx count */
1673#define AR5K_PROFCNT_RXCLR 0x80f4 /* Clear Rx count */ 1686#define AR5K_PROFCNT_RXCLR 0x80f4 /* Busy count */
1674#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle count (?) */ 1687#define AR5K_PROFCNT_CYCLE 0x80f8 /* Cycle counter */
1675 1688
1676/* 1689/*
1677 * Quiet period control registers 1690 * Quiet period control registers
@@ -1758,7 +1771,7 @@
1758#define AR5K_CCK_FIL_CNT 0x8128 1771#define AR5K_CCK_FIL_CNT 0x8128
1759 1772
1760/* 1773/*
1761 * PHY Error Counters (?) 1774 * PHY Error Counters (same masks as AR5K_PHY_ERR_FIL)
1762 */ 1775 */
1763#define AR5K_PHYERR_CNT1 0x812c 1776#define AR5K_PHYERR_CNT1 0x812c
1764#define AR5K_PHYERR_CNT1_MASK 0x8130 1777#define AR5K_PHYERR_CNT1_MASK 0x8130
@@ -1766,6 +1779,9 @@
1766#define AR5K_PHYERR_CNT2 0x8134 1779#define AR5K_PHYERR_CNT2 0x8134
1767#define AR5K_PHYERR_CNT2_MASK 0x8138 1780#define AR5K_PHYERR_CNT2_MASK 0x8138
1768 1781
1782/* if the PHY Error Counters reach this maximum, we get MIB interrupts */
1783#define ATH5K_PHYERR_CNT_MAX 0x00c00000
1784
1769/* 1785/*
1770 * TSF Threshold register (?) 1786 * TSF Threshold register (?)
1771 */ 1787 */
@@ -1974,7 +1990,7 @@
1974#define AR5K_PHY_SETTLING 0x9844 /* Register Address */ 1990#define AR5K_PHY_SETTLING 0x9844 /* Register Address */
1975#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */ 1991#define AR5K_PHY_SETTLING_AGC 0x0000007f /* AGC settling time */
1976#define AR5K_PHY_SETTLING_AGC_S 0 1992#define AR5K_PHY_SETTLING_AGC_S 0
1977#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settlig time */ 1993#define AR5K_PHY_SETTLING_SWITCH 0x00003f80 /* Switch settling time */
1978#define AR5K_PHY_SETTLING_SWITCH_S 7 1994#define AR5K_PHY_SETTLING_SWITCH_S 7
1979 1995
1980/* 1996/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index cbf28e379843..44bbbf2a6edd 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -19,8 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#define _ATH5K_RESET
23
24/*****************************\ 22/*****************************\
25 Reset functions and helpers 23 Reset functions and helpers
26\*****************************/ 24\*****************************/
@@ -34,6 +32,27 @@
34#include "base.h" 32#include "base.h"
35#include "debug.h" 33#include "debug.h"
36 34
35/*
36 * Check if a register write has been completed
37 */
38int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
39 bool is_set)
40{
41 int i;
42 u32 data;
43
44 for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
45 data = ath5k_hw_reg_read(ah, reg);
46 if (is_set && (data & flag))
47 break;
48 else if ((data & flag) == val)
49 break;
50 udelay(15);
51 }
52
53 return (i <= 0) ? -EAGAIN : 0;
54}
55
37/** 56/**
38 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 57 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
39 * 58 *
@@ -221,8 +240,8 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
221/* 240/*
222 * Sleep control 241 * Sleep control
223 */ 242 */
224int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, 243static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
225 bool set_chip, u16 sleep_duration) 244 bool set_chip, u16 sleep_duration)
226{ 245{
227 unsigned int i; 246 unsigned int i;
228 u32 staid, data; 247 u32 staid, data;
@@ -1017,11 +1036,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1017 if (ret) 1036 if (ret)
1018 return ret; 1037 return ret;
1019 1038
1020 /*
1021 * Initialize operating mode
1022 */
1023 ah->ah_op_mode = op_mode;
1024
1025 /* PHY access enable */ 1039 /* PHY access enable */
1026 if (ah->ah_mac_srev >= AR5K_SREV_AR5211) 1040 if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
1027 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0)); 1041 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
@@ -1192,7 +1206,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1192 ath5k_hw_set_associd(ah); 1206 ath5k_hw_set_associd(ah);
1193 1207
1194 /* Set PCU config */ 1208 /* Set PCU config */
1195 ath5k_hw_set_opmode(ah); 1209 ath5k_hw_set_opmode(ah, op_mode);
1196 1210
1197 /* Clear any pending interrupts 1211 /* Clear any pending interrupts
1198 * PISR/SISR Not available on 5210 */ 1212 * PISR/SISR Not available on 5210 */
@@ -1378,7 +1392,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1378 * external 32KHz crystal when sleeping if one 1392 * external 32KHz crystal when sleeping if one
1379 * exists */ 1393 * exists */
1380 if (ah->ah_version == AR5K_AR5212 && 1394 if (ah->ah_version == AR5K_AR5212 &&
1381 ah->ah_op_mode != NL80211_IFTYPE_AP) 1395 op_mode != NL80211_IFTYPE_AP)
1382 ath5k_hw_set_sleep_clock(ah, true); 1396 ath5k_hw_set_sleep_clock(ah, true);
1383 1397
1384 /* 1398 /*
@@ -1388,5 +1402,3 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1388 ath5k_hw_reset_tsf(ah); 1402 ath5k_hw_reset_tsf(ah);
1389 return 0; 1403 return 0;
1390} 1404}
1391
1392#undef _ATH5K_RESET
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 5774cea23a3b..35f23bdc442f 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -32,3 +32,24 @@ config ATH9K_DEBUGFS
32 32
33 Also required for changing debug message flags at run time. 33 Also required for changing debug message flags at run time.
34 34
35config ATH9K_HTC
36 tristate "Atheros HTC based wireless cards support"
37 depends on USB && MAC80211
38 select ATH9K_HW
39 select MAC80211_LEDS
40 select LEDS_CLASS
41 select NEW_LEDS
42 select ATH9K_COMMON
43 ---help---
44 Support for Atheros HTC based cards.
45 Chipsets supported: AR9271
46
47 For more information: http://wireless.kernel.org/en/users/Drivers/ath9k_htc
48
49 The built module will be ath9k_htc.
50
51config ATH9K_HTC_DEBUGFS
52 bool "Atheros ath9k_htc debugging"
53 depends on ATH9K_HTC && DEBUG_FS
54 ---help---
55 Say Y, if you need access to ath9k_htc's statistics.
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 6b50d5eb9ec3..97133beda269 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -28,3 +28,13 @@ obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
28 28
29obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o 29obj-$(CONFIG_ATH9K_COMMON) += ath9k_common.o
30ath9k_common-y:= common.o 30ath9k_common-y:= common.o
31
32ath9k_htc-y += htc_hst.o \
33 hif_usb.o \
34 wmi.o \
35 htc_drv_txrx.o \
36 htc_drv_main.o \
37 htc_drv_beacon.o \
38 htc_drv_init.o
39
40obj-$(CONFIG_ATH9K_HTC) += ath9k_htc.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index ca4994f13151..85fdd26039c8 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -47,6 +47,7 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
47} 47}
48 48
49static struct ath_bus_ops ath_ahb_bus_ops = { 49static struct ath_bus_ops ath_ahb_bus_ops = {
50 .ath_bus_type = ATH_AHB,
50 .read_cachesize = ath_ahb_read_cachesize, 51 .read_cachesize = ath_ahb_read_cachesize,
51 .eeprom_read = ath_ahb_eeprom_read, 52 .eeprom_read = ath_ahb_eeprom_read,
52}; 53};
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 83c7ea4c007f..bdcd257ca7a4 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -178,9 +178,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
178#define BAW_WITHIN(_start, _bawsz, _seqno) \ 178#define BAW_WITHIN(_start, _bawsz, _seqno) \
179 ((((_seqno) - (_start)) & 4095) < (_bawsz)) 179 ((((_seqno) - (_start)) & 4095) < (_bawsz))
180 180
181#define ATH_DS_BA_SEQ(_ds) ((_ds)->ds_us.tx.ts_seqnum)
182#define ATH_DS_BA_BITMAP(_ds) (&(_ds)->ds_us.tx.ba_low)
183#define ATH_DS_TX_BA(_ds) ((_ds)->ds_us.tx.ts_flags & ATH9K_TX_BA)
184#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)]) 181#define ATH_AN_2_TID(_an, _tidno) (&(_an)->tid[(_tidno)])
185 182
186#define ATH_TX_COMPLETE_POLL_INT 1000 183#define ATH_TX_COMPLETE_POLL_INT 1000
@@ -483,7 +480,6 @@ struct ath_softc {
483 bool ps_enabled; 480 bool ps_enabled;
484 bool ps_idle; 481 bool ps_idle;
485 unsigned long ps_usecount; 482 unsigned long ps_usecount;
486 enum ath9k_int imask;
487 483
488 struct ath_config config; 484 struct ath_config config;
489 struct ath_rx rx; 485 struct ath_rx rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index b4a31a43a62c..22375a754718 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -524,6 +524,7 @@ static void ath9k_beacon_init(struct ath_softc *sc,
524static void ath_beacon_config_ap(struct ath_softc *sc, 524static void ath_beacon_config_ap(struct ath_softc *sc,
525 struct ath_beacon_config *conf) 525 struct ath_beacon_config *conf)
526{ 526{
527 struct ath_hw *ah = sc->sc_ah;
527 u32 nexttbtt, intval; 528 u32 nexttbtt, intval;
528 529
529 /* NB: the beacon interval is kept internally in TU's */ 530 /* NB: the beacon interval is kept internally in TU's */
@@ -539,15 +540,15 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
539 * prepare beacon frames. 540 * prepare beacon frames.
540 */ 541 */
541 intval |= ATH9K_BEACON_ENA; 542 intval |= ATH9K_BEACON_ENA;
542 sc->imask |= ATH9K_INT_SWBA; 543 ah->imask |= ATH9K_INT_SWBA;
543 ath_beaconq_config(sc); 544 ath_beaconq_config(sc);
544 545
545 /* Set the computed AP beacon timers */ 546 /* Set the computed AP beacon timers */
546 547
547 ath9k_hw_set_interrupts(sc->sc_ah, 0); 548 ath9k_hw_set_interrupts(ah, 0);
548 ath9k_beacon_init(sc, nexttbtt, intval); 549 ath9k_beacon_init(sc, nexttbtt, intval);
549 sc->beacon.bmisscnt = 0; 550 sc->beacon.bmisscnt = 0;
550 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 551 ath9k_hw_set_interrupts(ah, ah->imask);
551 552
552 /* Clear the reset TSF flag, so that subsequent beacon updation 553 /* Clear the reset TSF flag, so that subsequent beacon updation
553 will not reset the HW TSF. */ 554 will not reset the HW TSF. */
@@ -566,7 +567,8 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
566static void ath_beacon_config_sta(struct ath_softc *sc, 567static void ath_beacon_config_sta(struct ath_softc *sc,
567 struct ath_beacon_config *conf) 568 struct ath_beacon_config *conf)
568{ 569{
569 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 570 struct ath_hw *ah = sc->sc_ah;
571 struct ath_common *common = ath9k_hw_common(ah);
570 struct ath9k_beacon_state bs; 572 struct ath9k_beacon_state bs;
571 int dtimperiod, dtimcount, sleepduration; 573 int dtimperiod, dtimcount, sleepduration;
572 int cfpperiod, cfpcount; 574 int cfpperiod, cfpcount;
@@ -605,7 +607,7 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
605 * Pull nexttbtt forward to reflect the current 607 * Pull nexttbtt forward to reflect the current
606 * TSF and calculate dtim+cfp state for the result. 608 * TSF and calculate dtim+cfp state for the result.
607 */ 609 */
608 tsf = ath9k_hw_gettsf64(sc->sc_ah); 610 tsf = ath9k_hw_gettsf64(ah);
609 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE; 611 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
610 612
611 num_beacons = tsftu / intval + 1; 613 num_beacons = tsftu / intval + 1;
@@ -678,17 +680,18 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
678 680
679 /* Set the computed STA beacon timers */ 681 /* Set the computed STA beacon timers */
680 682
681 ath9k_hw_set_interrupts(sc->sc_ah, 0); 683 ath9k_hw_set_interrupts(ah, 0);
682 ath9k_hw_set_sta_beacon_timers(sc->sc_ah, &bs); 684 ath9k_hw_set_sta_beacon_timers(ah, &bs);
683 sc->imask |= ATH9K_INT_BMISS; 685 ah->imask |= ATH9K_INT_BMISS;
684 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 686 ath9k_hw_set_interrupts(ah, ah->imask);
685} 687}
686 688
687static void ath_beacon_config_adhoc(struct ath_softc *sc, 689static void ath_beacon_config_adhoc(struct ath_softc *sc,
688 struct ath_beacon_config *conf, 690 struct ath_beacon_config *conf,
689 struct ieee80211_vif *vif) 691 struct ieee80211_vif *vif)
690{ 692{
691 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 693 struct ath_hw *ah = sc->sc_ah;
694 struct ath_common *common = ath9k_hw_common(ah);
692 u64 tsf; 695 u64 tsf;
693 u32 tsftu, intval, nexttbtt; 696 u32 tsftu, intval, nexttbtt;
694 697
@@ -703,7 +706,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
703 else if (intval) 706 else if (intval)
704 nexttbtt = roundup(nexttbtt, intval); 707 nexttbtt = roundup(nexttbtt, intval);
705 708
706 tsf = ath9k_hw_gettsf64(sc->sc_ah); 709 tsf = ath9k_hw_gettsf64(ah);
707 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE; 710 tsftu = TSF_TO_TU((u32)(tsf>>32), (u32)tsf) + FUDGE;
708 do { 711 do {
709 nexttbtt += intval; 712 nexttbtt += intval;
@@ -719,20 +722,20 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
719 * self-linked tx descriptor and let the hardware deal with things. 722 * self-linked tx descriptor and let the hardware deal with things.
720 */ 723 */
721 intval |= ATH9K_BEACON_ENA; 724 intval |= ATH9K_BEACON_ENA;
722 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)) 725 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_VEOL))
723 sc->imask |= ATH9K_INT_SWBA; 726 ah->imask |= ATH9K_INT_SWBA;
724 727
725 ath_beaconq_config(sc); 728 ath_beaconq_config(sc);
726 729
727 /* Set the computed ADHOC beacon timers */ 730 /* Set the computed ADHOC beacon timers */
728 731
729 ath9k_hw_set_interrupts(sc->sc_ah, 0); 732 ath9k_hw_set_interrupts(ah, 0);
730 ath9k_beacon_init(sc, nexttbtt, intval); 733 ath9k_beacon_init(sc, nexttbtt, intval);
731 sc->beacon.bmisscnt = 0; 734 sc->beacon.bmisscnt = 0;
732 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 735 ath9k_hw_set_interrupts(ah, ah->imask);
733 736
734 /* FIXME: Handle properly when vif is NULL */ 737 /* FIXME: Handle properly when vif is NULL */
735 if (vif && sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_VEOL) 738 if (vif && ah->caps.hw_caps & ATH9K_HW_CAP_VEOL)
736 ath_beacon_start_adhoc(sc, vif); 739 ath_beacon_start_adhoc(sc, vif);
737} 740}
738 741
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 238a5744d8e9..064f5b51dfcd 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -18,6 +18,7 @@
18 18
19/* We can tune this as we go by monitoring really low values */ 19/* We can tune this as we go by monitoring really low values */
20#define ATH9K_NF_TOO_LOW -60 20#define ATH9K_NF_TOO_LOW -60
21#define AR9285_CLCAL_REDO_THRESH 1
21 22
22/* AR5416 may return very high value (like -31 dBm), in those cases the nf 23/* AR5416 may return very high value (like -31 dBm), in those cases the nf
23 * is incorrect and we should use the static NF value. Later we can try to 24 * is incorrect and we should use the static NF value. Later we can try to
@@ -101,9 +102,13 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
101 nf = 0 - ((nf ^ 0x1ff) + 1); 102 nf = 0 - ((nf ^ 0x1ff) + 1);
102 ath_print(common, ATH_DBG_CALIBRATE, 103 ath_print(common, ATH_DBG_CALIBRATE,
103 "NF calibrated [ctl] [chain 0] is %d\n", nf); 104 "NF calibrated [ctl] [chain 0] is %d\n", nf);
105
106 if (AR_SREV_9271(ah) && (nf >= -114))
107 nf = -116;
108
104 nfarray[0] = nf; 109 nfarray[0] = nf;
105 110
106 if (!AR_SREV_9285(ah)) { 111 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
107 if (AR_SREV_9280_10_OR_LATER(ah)) 112 if (AR_SREV_9280_10_OR_LATER(ah))
108 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), 113 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA),
109 AR9280_PHY_CH1_MINCCA_PWR); 114 AR9280_PHY_CH1_MINCCA_PWR);
@@ -139,9 +144,13 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
139 nf = 0 - ((nf ^ 0x1ff) + 1); 144 nf = 0 - ((nf ^ 0x1ff) + 1);
140 ath_print(common, ATH_DBG_CALIBRATE, 145 ath_print(common, ATH_DBG_CALIBRATE,
141 "NF calibrated [ext] [chain 0] is %d\n", nf); 146 "NF calibrated [ext] [chain 0] is %d\n", nf);
147
148 if (AR_SREV_9271(ah) && (nf >= -114))
149 nf = -116;
150
142 nfarray[3] = nf; 151 nfarray[3] = nf;
143 152
144 if (!AR_SREV_9285(ah)) { 153 if (!AR_SREV_9285(ah) && !AR_SREV_9271(ah)) {
145 if (AR_SREV_9280_10_OR_LATER(ah)) 154 if (AR_SREV_9280_10_OR_LATER(ah))
146 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), 155 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA),
147 AR9280_PHY_CH1_EXT_MINCCA_PWR); 156 AR9280_PHY_CH1_EXT_MINCCA_PWR);
@@ -621,7 +630,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
621 u8 chainmask, rx_chain_status; 630 u8 chainmask, rx_chain_status;
622 631
623 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK); 632 rx_chain_status = REG_READ(ah, AR_PHY_RX_CHAINMASK);
624 if (AR_SREV_9285(ah)) 633 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
625 chainmask = 0x9; 634 chainmask = 0x9;
626 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) { 635 else if (AR_SREV_9280(ah) || AR_SREV_9287(ah)) {
627 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4)) 636 if ((rx_chain_status & 0x2) || (rx_chain_status & 0x4))
@@ -715,7 +724,7 @@ void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah)
715 724
716 if (AR_SREV_9280(ah)) 725 if (AR_SREV_9280(ah))
717 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE; 726 noise_floor = AR_PHY_CCA_MAX_AR9280_GOOD_VALUE;
718 else if (AR_SREV_9285(ah)) 727 else if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
719 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE; 728 noise_floor = AR_PHY_CCA_MAX_AR9285_GOOD_VALUE;
720 else if (AR_SREV_9287(ah)) 729 else if (AR_SREV_9287(ah))
721 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE; 730 noise_floor = AR_PHY_CCA_MAX_AR9287_GOOD_VALUE;
@@ -1051,9 +1060,12 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1051 /* Do NF cal only at longer intervals */ 1060 /* Do NF cal only at longer intervals */
1052 if (longcal) { 1061 if (longcal) {
1053 /* Do periodic PAOffset Cal */ 1062 /* Do periodic PAOffset Cal */
1054 if (AR_SREV_9271(ah)) 1063 if (AR_SREV_9271(ah)) {
1055 ath9k_hw_9271_pa_cal(ah, false); 1064 if (!ah->pacal_info.skipcount)
1056 else if (AR_SREV_9285_11_OR_LATER(ah)) { 1065 ath9k_hw_9271_pa_cal(ah, false);
1066 else
1067 ah->pacal_info.skipcount--;
1068 } else if (AR_SREV_9285_11_OR_LATER(ah)) {
1057 if (!ah->pacal_info.skipcount) 1069 if (!ah->pacal_info.skipcount)
1058 ath9k_hw_9285_pa_cal(ah, false); 1070 ath9k_hw_9285_pa_cal(ah, false);
1059 else 1071 else
@@ -1080,7 +1092,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1080EXPORT_SYMBOL(ath9k_hw_calibrate); 1092EXPORT_SYMBOL(ath9k_hw_calibrate);
1081 1093
1082/* Carrier leakage Calibration fix */ 1094/* Carrier leakage Calibration fix */
1083static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan) 1095static bool ar9285_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1084{ 1096{
1085 struct ath_common *common = ath9k_hw_common(ah); 1097 struct ath_common *common = ath9k_hw_common(ah);
1086 1098
@@ -1121,6 +1133,62 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1121 return true; 1133 return true;
1122} 1134}
1123 1135
1136static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1137{
1138 int i;
1139 u_int32_t txgain_max;
1140 u_int32_t clc_gain, gain_mask = 0, clc_num = 0;
1141 u_int32_t reg_clc_I0, reg_clc_Q0;
1142 u_int32_t i0_num = 0;
1143 u_int32_t q0_num = 0;
1144 u_int32_t total_num = 0;
1145 u_int32_t reg_rf2g5_org;
1146 bool retv = true;
1147
1148 if (!(ar9285_cl_cal(ah, chan)))
1149 return false;
1150
1151 txgain_max = MS(REG_READ(ah, AR_PHY_TX_PWRCTRL7),
1152 AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX);
1153
1154 for (i = 0; i < (txgain_max+1); i++) {
1155 clc_gain = (REG_READ(ah, (AR_PHY_TX_GAIN_TBL1+(i<<2))) &
1156 AR_PHY_TX_GAIN_CLC) >> AR_PHY_TX_GAIN_CLC_S;
1157 if (!(gain_mask & (1 << clc_gain))) {
1158 gain_mask |= (1 << clc_gain);
1159 clc_num++;
1160 }
1161 }
1162
1163 for (i = 0; i < clc_num; i++) {
1164 reg_clc_I0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
1165 & AR_PHY_CLC_I0) >> AR_PHY_CLC_I0_S;
1166 reg_clc_Q0 = (REG_READ(ah, (AR_PHY_CLC_TBL1 + (i << 2)))
1167 & AR_PHY_CLC_Q0) >> AR_PHY_CLC_Q0_S;
1168 if (reg_clc_I0 == 0)
1169 i0_num++;
1170
1171 if (reg_clc_Q0 == 0)
1172 q0_num++;
1173 }
1174 total_num = i0_num + q0_num;
1175 if (total_num > AR9285_CLCAL_REDO_THRESH) {
1176 reg_rf2g5_org = REG_READ(ah, AR9285_RF2G5);
1177 if (AR_SREV_9285E_20(ah)) {
1178 REG_WRITE(ah, AR9285_RF2G5,
1179 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
1180 AR9285_RF2G5_IC50TX_XE_SET);
1181 } else {
1182 REG_WRITE(ah, AR9285_RF2G5,
1183 (reg_rf2g5_org & AR9285_RF2G5_IC50TX) |
1184 AR9285_RF2G5_IC50TX_SET);
1185 }
1186 retv = ar9285_cl_cal(ah, chan);
1187 REG_WRITE(ah, AR9285_RF2G5, reg_rf2g5_org);
1188 }
1189 return retv;
1190}
1191
1124bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) 1192bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1125{ 1193{
1126 struct ath_common *common = ath9k_hw_common(ah); 1194 struct ath_common *common = ath9k_hw_common(ah);
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 4d775ae141db..09effdedc8c0 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -255,7 +255,8 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
255 255
256 keyix = rx_stats->rs_keyix; 256 keyix = rx_stats->rs_keyix;
257 257
258 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error) { 258 if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
259 ieee80211_has_protected(fc)) {
259 rxs->flag |= RX_FLAG_DECRYPTED; 260 rxs->flag |= RX_FLAG_DECRYPTED;
260 } else if (ieee80211_has_protected(fc) 261 } else if (ieee80211_has_protected(fc)
261 && !decrypt_error && skb->len >= hdrlen + 4) { 262 && !decrypt_error && skb->len >= hdrlen + 4) {
@@ -286,6 +287,345 @@ int ath9k_cmn_padpos(__le16 frame_control)
286} 287}
287EXPORT_SYMBOL(ath9k_cmn_padpos); 288EXPORT_SYMBOL(ath9k_cmn_padpos);
288 289
290int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
291{
292 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
293
294 if (tx_info->control.hw_key) {
295 if (tx_info->control.hw_key->alg == ALG_WEP)
296 return ATH9K_KEY_TYPE_WEP;
297 else if (tx_info->control.hw_key->alg == ALG_TKIP)
298 return ATH9K_KEY_TYPE_TKIP;
299 else if (tx_info->control.hw_key->alg == ALG_CCMP)
300 return ATH9K_KEY_TYPE_AES;
301 }
302
303 return ATH9K_KEY_TYPE_CLEAR;
304}
305EXPORT_SYMBOL(ath9k_cmn_get_hw_crypto_keytype);
306
307static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
308 enum nl80211_channel_type channel_type)
309{
310 u32 chanmode = 0;
311
312 switch (chan->band) {
313 case IEEE80211_BAND_2GHZ:
314 switch (channel_type) {
315 case NL80211_CHAN_NO_HT:
316 case NL80211_CHAN_HT20:
317 chanmode = CHANNEL_G_HT20;
318 break;
319 case NL80211_CHAN_HT40PLUS:
320 chanmode = CHANNEL_G_HT40PLUS;
321 break;
322 case NL80211_CHAN_HT40MINUS:
323 chanmode = CHANNEL_G_HT40MINUS;
324 break;
325 }
326 break;
327 case IEEE80211_BAND_5GHZ:
328 switch (channel_type) {
329 case NL80211_CHAN_NO_HT:
330 case NL80211_CHAN_HT20:
331 chanmode = CHANNEL_A_HT20;
332 break;
333 case NL80211_CHAN_HT40PLUS:
334 chanmode = CHANNEL_A_HT40PLUS;
335 break;
336 case NL80211_CHAN_HT40MINUS:
337 chanmode = CHANNEL_A_HT40MINUS;
338 break;
339 }
340 break;
341 default:
342 break;
343 }
344
345 return chanmode;
346}
347
348/*
349 * Update internal channel flags.
350 */
351void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
352 struct ath9k_channel *ichan)
353{
354 struct ieee80211_channel *chan = hw->conf.channel;
355 struct ieee80211_conf *conf = &hw->conf;
356
357 ichan->channel = chan->center_freq;
358 ichan->chan = chan;
359
360 if (chan->band == IEEE80211_BAND_2GHZ) {
361 ichan->chanmode = CHANNEL_G;
362 ichan->channelFlags = CHANNEL_2GHZ | CHANNEL_OFDM | CHANNEL_G;
363 } else {
364 ichan->chanmode = CHANNEL_A;
365 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
366 }
367
368 if (conf_is_ht(conf))
369 ichan->chanmode = ath9k_get_extchanmode(chan,
370 conf->channel_type);
371}
372EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
373
374/*
375 * Get the internal channel reference.
376 */
377struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
378 struct ath_hw *ah)
379{
380 struct ieee80211_channel *curchan = hw->conf.channel;
381 struct ath9k_channel *channel;
382 u8 chan_idx;
383
384 chan_idx = curchan->hw_value;
385 channel = &ah->channels[chan_idx];
386 ath9k_cmn_update_ichannel(hw, channel);
387
388 return channel;
389}
390EXPORT_SYMBOL(ath9k_cmn_get_curchannel);
391
392static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
393 struct ath9k_keyval *hk, const u8 *addr,
394 bool authenticator)
395{
396 struct ath_hw *ah = common->ah;
397 const u8 *key_rxmic;
398 const u8 *key_txmic;
399
400 key_txmic = key + NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY;
401 key_rxmic = key + NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY;
402
403 if (addr == NULL) {
404 /*
405 * Group key installation - only two key cache entries are used
406 * regardless of splitmic capability since group key is only
407 * used either for TX or RX.
408 */
409 if (authenticator) {
410 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
411 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_mic));
412 } else {
413 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
414 memcpy(hk->kv_txmic, key_rxmic, sizeof(hk->kv_mic));
415 }
416 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
417 }
418 if (!common->splitmic) {
419 /* TX and RX keys share the same key cache entry. */
420 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
421 memcpy(hk->kv_txmic, key_txmic, sizeof(hk->kv_txmic));
422 return ath9k_hw_set_keycache_entry(ah, keyix, hk, addr);
423 }
424
425 /* Separate key cache entries for TX and RX */
426
427 /* TX key goes at first index, RX key at +32. */
428 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
429 if (!ath9k_hw_set_keycache_entry(ah, keyix, hk, NULL)) {
430 /* TX MIC entry failed. No need to proceed further */
431 ath_print(common, ATH_DBG_FATAL,
432 "Setting TX MIC Key Failed\n");
433 return 0;
434 }
435
436 memcpy(hk->kv_mic, key_rxmic, sizeof(hk->kv_mic));
437 /* XXX delete tx key on failure? */
438 return ath9k_hw_set_keycache_entry(ah, keyix + 32, hk, addr);
439}
440
441static int ath_reserve_key_cache_slot_tkip(struct ath_common *common)
442{
443 int i;
444
445 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
446 if (test_bit(i, common->keymap) ||
447 test_bit(i + 64, common->keymap))
448 continue; /* At least one part of TKIP key allocated */
449 if (common->splitmic &&
450 (test_bit(i + 32, common->keymap) ||
451 test_bit(i + 64 + 32, common->keymap)))
452 continue; /* At least one part of TKIP key allocated */
453
454 /* Found a free slot for a TKIP key */
455 return i;
456 }
457 return -1;
458}
459
460static int ath_reserve_key_cache_slot(struct ath_common *common)
461{
462 int i;
463
464 /* First, try to find slots that would not be available for TKIP. */
465 if (common->splitmic) {
466 for (i = IEEE80211_WEP_NKID; i < common->keymax / 4; i++) {
467 if (!test_bit(i, common->keymap) &&
468 (test_bit(i + 32, common->keymap) ||
469 test_bit(i + 64, common->keymap) ||
470 test_bit(i + 64 + 32, common->keymap)))
471 return i;
472 if (!test_bit(i + 32, common->keymap) &&
473 (test_bit(i, common->keymap) ||
474 test_bit(i + 64, common->keymap) ||
475 test_bit(i + 64 + 32, common->keymap)))
476 return i + 32;
477 if (!test_bit(i + 64, common->keymap) &&
478 (test_bit(i , common->keymap) ||
479 test_bit(i + 32, common->keymap) ||
480 test_bit(i + 64 + 32, common->keymap)))
481 return i + 64;
482 if (!test_bit(i + 64 + 32, common->keymap) &&
483 (test_bit(i, common->keymap) ||
484 test_bit(i + 32, common->keymap) ||
485 test_bit(i + 64, common->keymap)))
486 return i + 64 + 32;
487 }
488 } else {
489 for (i = IEEE80211_WEP_NKID; i < common->keymax / 2; i++) {
490 if (!test_bit(i, common->keymap) &&
491 test_bit(i + 64, common->keymap))
492 return i;
493 if (test_bit(i, common->keymap) &&
494 !test_bit(i + 64, common->keymap))
495 return i + 64;
496 }
497 }
498
499 /* No partially used TKIP slots, pick any available slot */
500 for (i = IEEE80211_WEP_NKID; i < common->keymax; i++) {
501 /* Do not allow slots that could be needed for TKIP group keys
502 * to be used. This limitation could be removed if we know that
503 * TKIP will not be used. */
504 if (i >= 64 && i < 64 + IEEE80211_WEP_NKID)
505 continue;
506 if (common->splitmic) {
507 if (i >= 32 && i < 32 + IEEE80211_WEP_NKID)
508 continue;
509 if (i >= 64 + 32 && i < 64 + 32 + IEEE80211_WEP_NKID)
510 continue;
511 }
512
513 if (!test_bit(i, common->keymap))
514 return i; /* Found a free slot for a key */
515 }
516
517 /* No free slot found */
518 return -1;
519}
520
521/*
522 * Configure encryption in the HW.
523 */
524int ath9k_cmn_key_config(struct ath_common *common,
525 struct ieee80211_vif *vif,
526 struct ieee80211_sta *sta,
527 struct ieee80211_key_conf *key)
528{
529 struct ath_hw *ah = common->ah;
530 struct ath9k_keyval hk;
531 const u8 *mac = NULL;
532 int ret = 0;
533 int idx;
534
535 memset(&hk, 0, sizeof(hk));
536
537 switch (key->alg) {
538 case ALG_WEP:
539 hk.kv_type = ATH9K_CIPHER_WEP;
540 break;
541 case ALG_TKIP:
542 hk.kv_type = ATH9K_CIPHER_TKIP;
543 break;
544 case ALG_CCMP:
545 hk.kv_type = ATH9K_CIPHER_AES_CCM;
546 break;
547 default:
548 return -EOPNOTSUPP;
549 }
550
551 hk.kv_len = key->keylen;
552 memcpy(hk.kv_val, key->key, key->keylen);
553
554 if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
555 /* For now, use the default keys for broadcast keys. This may
556 * need to change with virtual interfaces. */
557 idx = key->keyidx;
558 } else if (key->keyidx) {
559 if (WARN_ON(!sta))
560 return -EOPNOTSUPP;
561 mac = sta->addr;
562
563 if (vif->type != NL80211_IFTYPE_AP) {
564 /* Only keyidx 0 should be used with unicast key, but
565 * allow this for client mode for now. */
566 idx = key->keyidx;
567 } else
568 return -EIO;
569 } else {
570 if (WARN_ON(!sta))
571 return -EOPNOTSUPP;
572 mac = sta->addr;
573
574 if (key->alg == ALG_TKIP)
575 idx = ath_reserve_key_cache_slot_tkip(common);
576 else
577 idx = ath_reserve_key_cache_slot(common);
578 if (idx < 0)
579 return -ENOSPC; /* no free key cache entries */
580 }
581
582 if (key->alg == ALG_TKIP)
583 ret = ath_setkey_tkip(common, idx, key->key, &hk, mac,
584 vif->type == NL80211_IFTYPE_AP);
585 else
586 ret = ath9k_hw_set_keycache_entry(ah, idx, &hk, mac);
587
588 if (!ret)
589 return -EIO;
590
591 set_bit(idx, common->keymap);
592 if (key->alg == ALG_TKIP) {
593 set_bit(idx + 64, common->keymap);
594 if (common->splitmic) {
595 set_bit(idx + 32, common->keymap);
596 set_bit(idx + 64 + 32, common->keymap);
597 }
598 }
599
600 return idx;
601}
602EXPORT_SYMBOL(ath9k_cmn_key_config);
603
604/*
605 * Delete Key.
606 */
607void ath9k_cmn_key_delete(struct ath_common *common,
608 struct ieee80211_key_conf *key)
609{
610 struct ath_hw *ah = common->ah;
611
612 ath9k_hw_keyreset(ah, key->hw_key_idx);
613 if (key->hw_key_idx < IEEE80211_WEP_NKID)
614 return;
615
616 clear_bit(key->hw_key_idx, common->keymap);
617 if (key->alg != ALG_TKIP)
618 return;
619
620 clear_bit(key->hw_key_idx + 64, common->keymap);
621 if (common->splitmic) {
622 ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
623 clear_bit(key->hw_key_idx + 32, common->keymap);
624 clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
625 }
626}
627EXPORT_SYMBOL(ath9k_cmn_key_delete);
628
289static int __init ath9k_cmn_init(void) 629static int __init ath9k_cmn_init(void)
290{ 630{
291 return 0; 631 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index 042999c2fe9c..72a835d9e97f 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -23,6 +23,8 @@
23 23
24/* Common header for Atheros 802.11n base driver cores */ 24/* Common header for Atheros 802.11n base driver cores */
25 25
26#define IEEE80211_WEP_NKID 4
27
26#define WME_NUM_TID 16 28#define WME_NUM_TID 16
27#define WME_BA_BMP_SIZE 64 29#define WME_BA_BMP_SIZE 64
28#define WME_MAX_BA WME_BA_BMP_SIZE 30#define WME_MAX_BA WME_BA_BMP_SIZE
@@ -125,3 +127,14 @@ void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
125 bool decrypt_error); 127 bool decrypt_error);
126 128
127int ath9k_cmn_padpos(__le16 frame_control); 129int ath9k_cmn_padpos(__le16 frame_control);
130int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
131void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
132 struct ath9k_channel *ichan);
133struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
134 struct ath_hw *ah);
135int ath9k_cmn_key_config(struct ath_common *common,
136 struct ieee80211_vif *vif,
137 struct ieee80211_sta *sta,
138 struct ieee80211_key_conf *key);
139void ath9k_cmn_key_delete(struct ath_common *common,
140 struct ieee80211_key_conf *key);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 081e0085ed4c..9a8e419398f9 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -157,10 +157,10 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
157 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n", 157 "txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
158 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 158 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
159 159
160 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n", 160 len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x\n",
161 REG_READ_D(ah, AR_OBS_BUS_1)); 161 REG_READ_D(ah, AR_OBS_BUS_1));
162 len += snprintf(buf + len, DMA_BUF_LEN - len, 162 len += snprintf(buf + len, DMA_BUF_LEN - len,
163 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR)); 163 "AR_CR: 0x%x\n", REG_READ_D(ah, AR_CR));
164 164
165 ath9k_ps_restore(sc); 165 ath9k_ps_restore(sc);
166 166
@@ -557,10 +557,8 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
557} 557}
558 558
559void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 559void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
560 struct ath_buf *bf) 560 struct ath_buf *bf, struct ath_tx_status *ts)
561{ 561{
562 struct ath_desc *ds = bf->bf_desc;
563
564 if (bf_isampdu(bf)) { 562 if (bf_isampdu(bf)) {
565 if (bf_isxretried(bf)) 563 if (bf_isxretried(bf))
566 TX_STAT_INC(txq->axq_qnum, a_xretries); 564 TX_STAT_INC(txq->axq_qnum, a_xretries);
@@ -570,17 +568,17 @@ void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
570 TX_STAT_INC(txq->axq_qnum, completed); 568 TX_STAT_INC(txq->axq_qnum, completed);
571 } 569 }
572 570
573 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO) 571 if (ts->ts_status & ATH9K_TXERR_FIFO)
574 TX_STAT_INC(txq->axq_qnum, fifo_underrun); 572 TX_STAT_INC(txq->axq_qnum, fifo_underrun);
575 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XTXOP) 573 if (ts->ts_status & ATH9K_TXERR_XTXOP)
576 TX_STAT_INC(txq->axq_qnum, xtxop); 574 TX_STAT_INC(txq->axq_qnum, xtxop);
577 if (ds->ds_txstat.ts_status & ATH9K_TXERR_TIMER_EXPIRED) 575 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
578 TX_STAT_INC(txq->axq_qnum, timer_exp); 576 TX_STAT_INC(txq->axq_qnum, timer_exp);
579 if (ds->ds_txstat.ts_flags & ATH9K_TX_DESC_CFG_ERR) 577 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
580 TX_STAT_INC(txq->axq_qnum, desc_cfg_err); 578 TX_STAT_INC(txq->axq_qnum, desc_cfg_err);
581 if (ds->ds_txstat.ts_flags & ATH9K_TX_DATA_UNDERRUN) 579 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
582 TX_STAT_INC(txq->axq_qnum, data_underrun); 580 TX_STAT_INC(txq->axq_qnum, data_underrun);
583 if (ds->ds_txstat.ts_flags & ATH9K_TX_DELIM_UNDERRUN) 581 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
584 TX_STAT_INC(txq->axq_qnum, delim_underrun); 582 TX_STAT_INC(txq->axq_qnum, delim_underrun);
585} 583}
586 584
@@ -663,30 +661,29 @@ static ssize_t read_file_recv(struct file *file, char __user *user_buf,
663#undef PHY_ERR 661#undef PHY_ERR
664} 662}
665 663
666void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf) 664void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs)
667{ 665{
668#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++ 666#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
669#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++ 667#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
670 668
671 struct ath_desc *ds = bf->bf_desc;
672 u32 phyerr; 669 u32 phyerr;
673 670
674 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC) 671 if (rs->rs_status & ATH9K_RXERR_CRC)
675 RX_STAT_INC(crc_err); 672 RX_STAT_INC(crc_err);
676 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT) 673 if (rs->rs_status & ATH9K_RXERR_DECRYPT)
677 RX_STAT_INC(decrypt_crc_err); 674 RX_STAT_INC(decrypt_crc_err);
678 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC) 675 if (rs->rs_status & ATH9K_RXERR_MIC)
679 RX_STAT_INC(mic_err); 676 RX_STAT_INC(mic_err);
680 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE) 677 if (rs->rs_status & ATH9K_RX_DELIM_CRC_PRE)
681 RX_STAT_INC(pre_delim_crc_err); 678 RX_STAT_INC(pre_delim_crc_err);
682 if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST) 679 if (rs->rs_status & ATH9K_RX_DELIM_CRC_POST)
683 RX_STAT_INC(post_delim_crc_err); 680 RX_STAT_INC(post_delim_crc_err);
684 if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY) 681 if (rs->rs_status & ATH9K_RX_DECRYPT_BUSY)
685 RX_STAT_INC(decrypt_busy_err); 682 RX_STAT_INC(decrypt_busy_err);
686 683
687 if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) { 684 if (rs->rs_status & ATH9K_RXERR_PHY) {
688 RX_STAT_INC(phy_err); 685 RX_STAT_INC(phy_err);
689 phyerr = ds->ds_rxstat.rs_phyerr & 0x24; 686 phyerr = rs->rs_phyerr & 0x24;
690 RX_PHY_ERR_INC(phyerr); 687 RX_PHY_ERR_INC(phyerr);
691 } 688 }
692 689
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 86780e68b31e..b2af9de755e6 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -167,8 +167,8 @@ void ath9k_debug_remove_root(void);
167void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 167void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate); 168void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
170 struct ath_buf *bf); 170 struct ath_buf *bf, struct ath_tx_status *ts);
171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf); 171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
172void ath_debug_stat_retries(struct ath_softc *sc, int rix, 172void ath_debug_stat_retries(struct ath_softc *sc, int rix,
173 int xretries, int retries, u8 per); 173 int xretries, int retries, u8 per);
174 174
@@ -204,12 +204,13 @@ static inline void ath_debug_stat_rc(struct ath_softc *sc,
204 204
205static inline void ath_debug_stat_tx(struct ath_softc *sc, 205static inline void ath_debug_stat_tx(struct ath_softc *sc,
206 struct ath_txq *txq, 206 struct ath_txq *txq,
207 struct ath_buf *bf) 207 struct ath_buf *bf,
208 struct ath_tx_status *ts)
208{ 209{
209} 210}
210 211
211static inline void ath_debug_stat_rx(struct ath_softc *sc, 212static inline void ath_debug_stat_rx(struct ath_softc *sc,
212 struct ath_buf *bf) 213 struct ath_rx_status *rs)
213{ 214{
214} 215}
215 216
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 68db16690abf..0354fe50f8e0 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -43,7 +43,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
43 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 43 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
44 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 44 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
45 ath_print(common, ATH_DBG_EEPROM, 45 ath_print(common, ATH_DBG_EEPROM,
46 "Unable to read eeprom region \n"); 46 "Unable to read eeprom region\n");
47 return false; 47 return false;
48 } 48 }
49 eep_data++; 49 eep_data++;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 839d05a1df29..d8ca94c3fa0c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -44,7 +44,7 @@ static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
44 if (!ath9k_hw_nvram_read(common, 44 if (!ath9k_hw_nvram_read(common,
45 addr + eep_start_loc, eep_data)) { 45 addr + eep_start_loc, eep_data)) {
46 ath_print(common, ATH_DBG_EEPROM, 46 ath_print(common, ATH_DBG_EEPROM,
47 "Unable to read eeprom region \n"); 47 "Unable to read eeprom region\n");
48 return false; 48 return false;
49 } 49 }
50 eep_data++; 50 eep_data++;
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index deab8beb0680..0ee75e79fe35 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -283,22 +283,17 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
283 u32 timer_next, 283 u32 timer_next,
284 u32 timer_period) 284 u32 timer_period)
285{ 285{
286 struct ath_common *common = ath9k_hw_common(ah);
287 struct ath_softc *sc = (struct ath_softc *) common->priv;
288
289 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); 286 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
290 287
291 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) { 288 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
292 ath9k_hw_set_interrupts(ah, 0); 289 ath9k_hw_set_interrupts(ah, 0);
293 sc->imask |= ATH9K_INT_GENTIMER; 290 ah->imask |= ATH9K_INT_GENTIMER;
294 ath9k_hw_set_interrupts(ah, sc->imask); 291 ath9k_hw_set_interrupts(ah, ah->imask);
295 } 292 }
296} 293}
297 294
298static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) 295static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
299{ 296{
300 struct ath_common *common = ath9k_hw_common(ah);
301 struct ath_softc *sc = (struct ath_softc *) common->priv;
302 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 297 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
303 298
304 ath9k_hw_gen_timer_stop(ah, timer); 299 ath9k_hw_gen_timer_stop(ah, timer);
@@ -306,8 +301,8 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
306 /* if no timer is enabled, turn off interrupt mask */ 301 /* if no timer is enabled, turn off interrupt mask */
307 if (timer_table->timer_mask.val == 0) { 302 if (timer_table->timer_mask.val == 0) {
308 ath9k_hw_set_interrupts(ah, 0); 303 ath9k_hw_set_interrupts(ah, 0);
309 sc->imask &= ~ATH9K_INT_GENTIMER; 304 ah->imask &= ~ATH9K_INT_GENTIMER;
310 ath9k_hw_set_interrupts(ah, sc->imask); 305 ath9k_hw_set_interrupts(ah, ah->imask);
311 } 306 }
312} 307}
313 308
@@ -364,7 +359,7 @@ static void ath_btcoex_no_stomp_timer(void *arg)
364 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 359 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
365 360
366 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 361 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
367 "no stomp timer running \n"); 362 "no stomp timer running\n");
368 363
369 spin_lock_bh(&btcoex->btcoex_lock); 364 spin_lock_bh(&btcoex->btcoex_lock);
370 365
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
new file mode 100644
index 000000000000..fe994e229898
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -0,0 +1,984 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#define ATH9K_FW_USB_DEV(devid, fw) \
20 { USB_DEVICE(0x0cf3, devid), .driver_info = (unsigned long) fw }
21
22static struct usb_device_id ath9k_hif_usb_ids[] = {
23 ATH9K_FW_USB_DEV(0x9271, "ar9271.fw"),
24 ATH9K_FW_USB_DEV(0x1006, "ar9271.fw"),
25 { },
26};
27
28MODULE_DEVICE_TABLE(usb, ath9k_hif_usb_ids);
29
30static int __hif_usb_tx(struct hif_device_usb *hif_dev);
31
32static void hif_usb_regout_cb(struct urb *urb)
33{
34 struct cmd_buf *cmd = (struct cmd_buf *)urb->context;
35
36 switch (urb->status) {
37 case 0:
38 break;
39 case -ENOENT:
40 case -ECONNRESET:
41 case -ENODEV:
42 case -ESHUTDOWN:
43 goto free;
44 default:
45 break;
46 }
47
48 if (cmd) {
49 ath9k_htc_txcompletion_cb(cmd->hif_dev->htc_handle,
50 cmd->skb, 1);
51 kfree(cmd);
52 }
53
54 return;
55free:
56 kfree_skb(cmd->skb);
57 kfree(cmd);
58}
59
60static int hif_usb_send_regout(struct hif_device_usb *hif_dev,
61 struct sk_buff *skb)
62{
63 struct urb *urb;
64 struct cmd_buf *cmd;
65 int ret = 0;
66
67 urb = usb_alloc_urb(0, GFP_KERNEL);
68 if (urb == NULL)
69 return -ENOMEM;
70
71 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
72 if (cmd == NULL) {
73 usb_free_urb(urb);
74 return -ENOMEM;
75 }
76
77 cmd->skb = skb;
78 cmd->hif_dev = hif_dev;
79
80 usb_fill_int_urb(urb, hif_dev->udev,
81 usb_sndintpipe(hif_dev->udev, USB_REG_OUT_PIPE),
82 skb->data, skb->len,
83 hif_usb_regout_cb, cmd, 1);
84
85 usb_anchor_urb(urb, &hif_dev->regout_submitted);
86 ret = usb_submit_urb(urb, GFP_KERNEL);
87 if (ret) {
88 usb_unanchor_urb(urb);
89 kfree(cmd);
90 }
91 usb_free_urb(urb);
92
93 return ret;
94}
95
96static void hif_usb_tx_cb(struct urb *urb)
97{
98 struct tx_buf *tx_buf = (struct tx_buf *) urb->context;
99 struct hif_device_usb *hif_dev = tx_buf->hif_dev;
100 struct sk_buff *skb;
101 bool drop, flush;
102
103 if (!hif_dev)
104 return;
105
106 switch (urb->status) {
107 case 0:
108 break;
109 case -ENOENT:
110 case -ECONNRESET:
111 break;
112 case -ENODEV:
113 case -ESHUTDOWN:
114 return;
115 default:
116 break;
117 }
118
119 if (tx_buf) {
120 spin_lock(&hif_dev->tx.tx_lock);
121 drop = !!(hif_dev->tx.flags & HIF_USB_TX_STOP);
122 flush = !!(hif_dev->tx.flags & HIF_USB_TX_FLUSH);
123 spin_unlock(&hif_dev->tx.tx_lock);
124
125 while ((skb = __skb_dequeue(&tx_buf->skb_queue)) != NULL) {
126 if (!drop && !flush) {
127 ath9k_htc_txcompletion_cb(hif_dev->htc_handle,
128 skb, 1);
129 TX_STAT_INC(skb_completed);
130 } else {
131 dev_kfree_skb_any(skb);
132 }
133 }
134
135 if (flush)
136 return;
137
138 tx_buf->len = tx_buf->offset = 0;
139 __skb_queue_head_init(&tx_buf->skb_queue);
140
141 spin_lock(&hif_dev->tx.tx_lock);
142 list_del(&tx_buf->list);
143 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
144 hif_dev->tx.tx_buf_cnt++;
145 if (!drop)
146 __hif_usb_tx(hif_dev); /* Check for pending SKBs */
147 TX_STAT_INC(buf_completed);
148 spin_unlock(&hif_dev->tx.tx_lock);
149 }
150}
151
152static inline void ath9k_skb_queue_purge(struct sk_buff_head *list)
153{
154 struct sk_buff *skb;
155 while ((skb = __skb_dequeue(list)) != NULL)
156 dev_kfree_skb_any(skb);
157}
158
159/* TX lock has to be taken */
160static int __hif_usb_tx(struct hif_device_usb *hif_dev)
161{
162 struct tx_buf *tx_buf = NULL;
163 struct sk_buff *nskb = NULL;
164 int ret = 0, i;
165 u16 *hdr, tx_skb_cnt = 0;
166 u8 *buf;
167
168 if (hif_dev->tx.tx_skb_cnt == 0)
169 return 0;
170
171 /* Check if a free TX buffer is available */
172 if (list_empty(&hif_dev->tx.tx_buf))
173 return 0;
174
175 tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list);
176 list_del(&tx_buf->list);
177 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_pending);
178 hif_dev->tx.tx_buf_cnt--;
179
180 tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM);
181
182 for (i = 0; i < tx_skb_cnt; i++) {
183 nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue);
184
185 /* Should never be NULL */
186 BUG_ON(!nskb);
187
188 hif_dev->tx.tx_skb_cnt--;
189
190 buf = tx_buf->buf;
191 buf += tx_buf->offset;
192 hdr = (u16 *)buf;
193 *hdr++ = nskb->len;
194 *hdr++ = ATH_USB_TX_STREAM_MODE_TAG;
195 buf += 4;
196 memcpy(buf, nskb->data, nskb->len);
197 tx_buf->len = nskb->len + 4;
198
199 if (i < (tx_skb_cnt - 1))
200 tx_buf->offset += (((tx_buf->len - 1) / 4) + 1) * 4;
201
202 if (i == (tx_skb_cnt - 1))
203 tx_buf->len += tx_buf->offset;
204
205 __skb_queue_tail(&tx_buf->skb_queue, nskb);
206 TX_STAT_INC(skb_queued);
207 }
208
209 usb_fill_bulk_urb(tx_buf->urb, hif_dev->udev,
210 usb_sndbulkpipe(hif_dev->udev, USB_WLAN_TX_PIPE),
211 tx_buf->buf, tx_buf->len,
212 hif_usb_tx_cb, tx_buf);
213
214 ret = usb_submit_urb(tx_buf->urb, GFP_ATOMIC);
215 if (ret) {
216 tx_buf->len = tx_buf->offset = 0;
217 ath9k_skb_queue_purge(&tx_buf->skb_queue);
218 __skb_queue_head_init(&tx_buf->skb_queue);
219 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
220 hif_dev->tx.tx_buf_cnt++;
221 }
222
223 if (!ret)
224 TX_STAT_INC(buf_queued);
225
226 return ret;
227}
228
229static int hif_usb_send_tx(struct hif_device_usb *hif_dev, struct sk_buff *skb,
230 struct ath9k_htc_tx_ctl *tx_ctl)
231{
232 unsigned long flags;
233
234 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
235
236 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
237 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
238 return -ENODEV;
239 }
240
241 /* Check if the max queue count has been reached */
242 if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) {
243 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
244 return -ENOMEM;
245 }
246
247 __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb);
248 hif_dev->tx.tx_skb_cnt++;
249
250 /* Send normal frames immediately */
251 if (!tx_ctl || (tx_ctl && (tx_ctl->type == ATH9K_HTC_NORMAL)))
252 __hif_usb_tx(hif_dev);
253
254 /* Check if AMPDUs have to be sent immediately */
255 if (tx_ctl && (tx_ctl->type == ATH9K_HTC_AMPDU) &&
256 (hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) &&
257 (hif_dev->tx.tx_skb_cnt < 2)) {
258 __hif_usb_tx(hif_dev);
259 }
260
261 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
262
263 return 0;
264}
265
266static void hif_usb_start(void *hif_handle, u8 pipe_id)
267{
268 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
269 unsigned long flags;
270
271 hif_dev->flags |= HIF_USB_START;
272
273 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
274 hif_dev->tx.flags &= ~HIF_USB_TX_STOP;
275 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
276}
277
278static void hif_usb_stop(void *hif_handle, u8 pipe_id)
279{
280 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
281 unsigned long flags;
282
283 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
284 ath9k_skb_queue_purge(&hif_dev->tx.tx_skb_queue);
285 hif_dev->tx.tx_skb_cnt = 0;
286 hif_dev->tx.flags |= HIF_USB_TX_STOP;
287 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
288}
289
290static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
291 struct ath9k_htc_tx_ctl *tx_ctl)
292{
293 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
294 int ret = 0;
295
296 switch (pipe_id) {
297 case USB_WLAN_TX_PIPE:
298 ret = hif_usb_send_tx(hif_dev, skb, tx_ctl);
299 break;
300 case USB_REG_OUT_PIPE:
301 ret = hif_usb_send_regout(hif_dev, skb);
302 break;
303 default:
304 dev_err(&hif_dev->udev->dev,
305 "ath9k_htc: Invalid TX pipe: %d\n", pipe_id);
306 ret = -EINVAL;
307 break;
308 }
309
310 return ret;
311}
312
313static struct ath9k_htc_hif hif_usb = {
314 .transport = ATH9K_HIF_USB,
315 .name = "ath9k_hif_usb",
316
317 .control_ul_pipe = USB_REG_OUT_PIPE,
318 .control_dl_pipe = USB_REG_IN_PIPE,
319
320 .start = hif_usb_start,
321 .stop = hif_usb_stop,
322 .send = hif_usb_send,
323};
324
325static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
326 struct sk_buff *skb)
327{
328 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
329 int index = 0, i = 0, chk_idx, len = skb->len;
330 int rx_remain_len = 0, rx_pkt_len = 0;
331 u16 pkt_len, pkt_tag, pool_index = 0;
332 u8 *ptr;
333
334 spin_lock(&hif_dev->rx_lock);
335
336 rx_remain_len = hif_dev->rx_remain_len;
337 rx_pkt_len = hif_dev->rx_transfer_len;
338
339 if (rx_remain_len != 0) {
340 struct sk_buff *remain_skb = hif_dev->remain_skb;
341
342 if (remain_skb) {
343 ptr = (u8 *) remain_skb->data;
344
345 index = rx_remain_len;
346 rx_remain_len -= hif_dev->rx_pad_len;
347 ptr += rx_pkt_len;
348
349 memcpy(ptr, skb->data, rx_remain_len);
350
351 rx_pkt_len += rx_remain_len;
352 hif_dev->rx_remain_len = 0;
353 skb_put(remain_skb, rx_pkt_len);
354
355 skb_pool[pool_index++] = remain_skb;
356
357 } else {
358 index = rx_remain_len;
359 }
360 }
361
362 spin_unlock(&hif_dev->rx_lock);
363
364 while (index < len) {
365 ptr = (u8 *) skb->data;
366
367 pkt_len = ptr[index] + (ptr[index+1] << 8);
368 pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
369
370 if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) {
371 u16 pad_len;
372
373 pad_len = 4 - (pkt_len & 0x3);
374 if (pad_len == 4)
375 pad_len = 0;
376
377 chk_idx = index;
378 index = index + 4 + pkt_len + pad_len;
379
380 if (index > MAX_RX_BUF_SIZE) {
381 spin_lock(&hif_dev->rx_lock);
382 hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
383 hif_dev->rx_transfer_len =
384 MAX_RX_BUF_SIZE - chk_idx - 4;
385 hif_dev->rx_pad_len = pad_len;
386
387 nskb = __dev_alloc_skb(pkt_len + 32,
388 GFP_ATOMIC);
389 if (!nskb) {
390 dev_err(&hif_dev->udev->dev,
391 "ath9k_htc: RX memory allocation"
392 " error\n");
393 spin_unlock(&hif_dev->rx_lock);
394 goto err;
395 }
396 skb_reserve(nskb, 32);
397 RX_STAT_INC(skb_allocated);
398
399 memcpy(nskb->data, &(skb->data[chk_idx+4]),
400 hif_dev->rx_transfer_len);
401
402 /* Record the buffer pointer */
403 hif_dev->remain_skb = nskb;
404 spin_unlock(&hif_dev->rx_lock);
405 } else {
406 nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
407 if (!nskb) {
408 dev_err(&hif_dev->udev->dev,
409 "ath9k_htc: RX memory allocation"
410 " error\n");
411 goto err;
412 }
413 skb_reserve(nskb, 32);
414 RX_STAT_INC(skb_allocated);
415
416 memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
417 skb_put(nskb, pkt_len);
418 skb_pool[pool_index++] = nskb;
419 }
420 } else {
421 RX_STAT_INC(skb_dropped);
422 return;
423 }
424 }
425
426err:
427 for (i = 0; i < pool_index; i++) {
428 ath9k_htc_rx_msg(hif_dev->htc_handle, skb_pool[i],
429 skb_pool[i]->len, USB_WLAN_RX_PIPE);
430 RX_STAT_INC(skb_completed);
431 }
432}
433
434static void ath9k_hif_usb_rx_cb(struct urb *urb)
435{
436 struct sk_buff *skb = (struct sk_buff *) urb->context;
437 struct hif_device_usb *hif_dev = (struct hif_device_usb *)
438 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
439 int ret;
440
441 if (!skb)
442 return;
443
444 if (!hif_dev)
445 goto free;
446
447 switch (urb->status) {
448 case 0:
449 break;
450 case -ENOENT:
451 case -ECONNRESET:
452 case -ENODEV:
453 case -ESHUTDOWN:
454 goto free;
455 default:
456 goto resubmit;
457 }
458
459 if (likely(urb->actual_length != 0)) {
460 skb_put(skb, urb->actual_length);
461 ath9k_hif_usb_rx_stream(hif_dev, skb);
462 }
463
464resubmit:
465 skb_reset_tail_pointer(skb);
466 skb_trim(skb, 0);
467
468 usb_anchor_urb(urb, &hif_dev->rx_submitted);
469 ret = usb_submit_urb(urb, GFP_ATOMIC);
470 if (ret) {
471 usb_unanchor_urb(urb);
472 goto free;
473 }
474
475 return;
476free:
477 kfree_skb(skb);
478}
479
480static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
481{
482 struct sk_buff *skb = (struct sk_buff *) urb->context;
483 struct sk_buff *nskb;
484 struct hif_device_usb *hif_dev = (struct hif_device_usb *)
485 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
486 int ret;
487
488 if (!skb)
489 return;
490
491 if (!hif_dev)
492 goto free;
493
494 switch (urb->status) {
495 case 0:
496 break;
497 case -ENOENT:
498 case -ECONNRESET:
499 case -ENODEV:
500 case -ESHUTDOWN:
501 goto free;
502 default:
503 goto resubmit;
504 }
505
506 if (likely(urb->actual_length != 0)) {
507 skb_put(skb, urb->actual_length);
508
509 nskb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_ATOMIC);
510 if (!nskb)
511 goto resubmit;
512
513 usb_fill_int_urb(urb, hif_dev->udev,
514 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
515 nskb->data, MAX_REG_IN_BUF_SIZE,
516 ath9k_hif_usb_reg_in_cb, nskb, 1);
517
518 ret = usb_submit_urb(urb, GFP_ATOMIC);
519 if (ret) {
520 kfree_skb(nskb);
521 goto free;
522 }
523
524 ath9k_htc_rx_msg(hif_dev->htc_handle, skb,
525 skb->len, USB_REG_IN_PIPE);
526
527 return;
528 }
529
530resubmit:
531 skb_reset_tail_pointer(skb);
532 skb_trim(skb, 0);
533
534 ret = usb_submit_urb(urb, GFP_ATOMIC);
535 if (ret)
536 goto free;
537
538 return;
539free:
540 kfree_skb(skb);
541 urb->context = NULL;
542}
543
544static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
545{
546 unsigned long flags;
547 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
548
549 list_for_each_entry_safe(tx_buf, tx_buf_tmp, &hif_dev->tx.tx_buf, list) {
550 list_del(&tx_buf->list);
551 usb_free_urb(tx_buf->urb);
552 kfree(tx_buf->buf);
553 kfree(tx_buf);
554 }
555
556 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
557 hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
558 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
559
560 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
561 &hif_dev->tx.tx_pending, list) {
562 usb_kill_urb(tx_buf->urb);
563 list_del(&tx_buf->list);
564 usb_free_urb(tx_buf->urb);
565 kfree(tx_buf->buf);
566 kfree(tx_buf);
567 }
568
569 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
570 hif_dev->tx.flags &= ~HIF_USB_TX_FLUSH;
571 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
572}
573
574static int ath9k_hif_usb_alloc_tx_urbs(struct hif_device_usb *hif_dev)
575{
576 struct tx_buf *tx_buf;
577 int i;
578
579 INIT_LIST_HEAD(&hif_dev->tx.tx_buf);
580 INIT_LIST_HEAD(&hif_dev->tx.tx_pending);
581 spin_lock_init(&hif_dev->tx.tx_lock);
582 __skb_queue_head_init(&hif_dev->tx.tx_skb_queue);
583
584 for (i = 0; i < MAX_TX_URB_NUM; i++) {
585 tx_buf = kzalloc(sizeof(struct tx_buf), GFP_KERNEL);
586 if (!tx_buf)
587 goto err;
588
589 tx_buf->buf = kzalloc(MAX_TX_BUF_SIZE, GFP_KERNEL);
590 if (!tx_buf->buf)
591 goto err;
592
593 tx_buf->urb = usb_alloc_urb(0, GFP_KERNEL);
594 if (!tx_buf->urb)
595 goto err;
596
597 tx_buf->hif_dev = hif_dev;
598 __skb_queue_head_init(&tx_buf->skb_queue);
599
600 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf);
601 }
602
603 hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM;
604
605 return 0;
606err:
607 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
608 return -ENOMEM;
609}
610
611static void ath9k_hif_usb_dealloc_rx_urbs(struct hif_device_usb *hif_dev)
612{
613 usb_kill_anchored_urbs(&hif_dev->rx_submitted);
614}
615
616static int ath9k_hif_usb_alloc_rx_urbs(struct hif_device_usb *hif_dev)
617{
618 struct urb *urb = NULL;
619 struct sk_buff *skb = NULL;
620 int i, ret;
621
622 init_usb_anchor(&hif_dev->rx_submitted);
623 spin_lock_init(&hif_dev->rx_lock);
624
625 for (i = 0; i < MAX_RX_URB_NUM; i++) {
626
627 /* Allocate URB */
628 urb = usb_alloc_urb(0, GFP_KERNEL);
629 if (urb == NULL) {
630 ret = -ENOMEM;
631 goto err_urb;
632 }
633
634 /* Allocate buffer */
635 skb = alloc_skb(MAX_RX_BUF_SIZE, GFP_KERNEL);
636 if (!skb) {
637 ret = -ENOMEM;
638 goto err_skb;
639 }
640
641 usb_fill_bulk_urb(urb, hif_dev->udev,
642 usb_rcvbulkpipe(hif_dev->udev,
643 USB_WLAN_RX_PIPE),
644 skb->data, MAX_RX_BUF_SIZE,
645 ath9k_hif_usb_rx_cb, skb);
646
647 /* Anchor URB */
648 usb_anchor_urb(urb, &hif_dev->rx_submitted);
649
650 /* Submit URB */
651 ret = usb_submit_urb(urb, GFP_KERNEL);
652 if (ret) {
653 usb_unanchor_urb(urb);
654 goto err_submit;
655 }
656
657 /*
658 * Drop reference count.
659 * This ensures that the URB is freed when killing them.
660 */
661 usb_free_urb(urb);
662 }
663
664 return 0;
665
666err_submit:
667 kfree_skb(skb);
668err_skb:
669 usb_free_urb(urb);
670err_urb:
671 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
672 return ret;
673}
674
675static void ath9k_hif_usb_dealloc_reg_in_urb(struct hif_device_usb *hif_dev)
676{
677 if (hif_dev->reg_in_urb) {
678 usb_kill_urb(hif_dev->reg_in_urb);
679 if (hif_dev->reg_in_urb->context)
680 kfree_skb((void *)hif_dev->reg_in_urb->context);
681 usb_free_urb(hif_dev->reg_in_urb);
682 hif_dev->reg_in_urb = NULL;
683 }
684}
685
686static int ath9k_hif_usb_alloc_reg_in_urb(struct hif_device_usb *hif_dev)
687{
688 struct sk_buff *skb;
689
690 hif_dev->reg_in_urb = usb_alloc_urb(0, GFP_KERNEL);
691 if (hif_dev->reg_in_urb == NULL)
692 return -ENOMEM;
693
694 skb = alloc_skb(MAX_REG_IN_BUF_SIZE, GFP_KERNEL);
695 if (!skb)
696 goto err;
697
698 usb_fill_int_urb(hif_dev->reg_in_urb, hif_dev->udev,
699 usb_rcvintpipe(hif_dev->udev, USB_REG_IN_PIPE),
700 skb->data, MAX_REG_IN_BUF_SIZE,
701 ath9k_hif_usb_reg_in_cb, skb, 1);
702
703 if (usb_submit_urb(hif_dev->reg_in_urb, GFP_KERNEL) != 0)
704 goto err;
705
706 return 0;
707
708err:
709 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
710 return -ENOMEM;
711}
712
713static int ath9k_hif_usb_alloc_urbs(struct hif_device_usb *hif_dev)
714{
715 /* Register Write */
716 init_usb_anchor(&hif_dev->regout_submitted);
717
718 /* TX */
719 if (ath9k_hif_usb_alloc_tx_urbs(hif_dev) < 0)
720 goto err;
721
722 /* RX */
723 if (ath9k_hif_usb_alloc_rx_urbs(hif_dev) < 0)
724 goto err;
725
726 /* Register Read */
727 if (ath9k_hif_usb_alloc_reg_in_urb(hif_dev) < 0)
728 goto err;
729
730 return 0;
731err:
732 return -ENOMEM;
733}
734
735static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
736{
737 int transfer, err;
738 const void *data = hif_dev->firmware->data;
739 size_t len = hif_dev->firmware->size;
740 u32 addr = AR9271_FIRMWARE;
741 u8 *buf = kzalloc(4096, GFP_KERNEL);
742
743 if (!buf)
744 return -ENOMEM;
745
746 while (len) {
747 transfer = min_t(int, len, 4096);
748 memcpy(buf, data, transfer);
749
750 err = usb_control_msg(hif_dev->udev,
751 usb_sndctrlpipe(hif_dev->udev, 0),
752 FIRMWARE_DOWNLOAD, 0x40 | USB_DIR_OUT,
753 addr >> 8, 0, buf, transfer, HZ);
754 if (err < 0) {
755 kfree(buf);
756 return err;
757 }
758
759 len -= transfer;
760 data += transfer;
761 addr += transfer;
762 }
763 kfree(buf);
764
765 /*
766 * Issue FW download complete command to firmware.
767 */
768 err = usb_control_msg(hif_dev->udev, usb_sndctrlpipe(hif_dev->udev, 0),
769 FIRMWARE_DOWNLOAD_COMP,
770 0x40 | USB_DIR_OUT,
771 AR9271_FIRMWARE_TEXT >> 8, 0, NULL, 0, HZ);
772 if (err)
773 return -EIO;
774
775 dev_info(&hif_dev->udev->dev, "ath9k_htc: Transferred FW: %s, size: %ld\n",
776 "ar9271.fw", (unsigned long) hif_dev->firmware->size);
777
778 return 0;
779}
780
781static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev,
782 const char *fw_name)
783{
784 int ret;
785
786 /* Request firmware */
787 ret = request_firmware(&hif_dev->firmware, fw_name, &hif_dev->udev->dev);
788 if (ret) {
789 dev_err(&hif_dev->udev->dev,
790 "ath9k_htc: Firmware - %s not found\n", fw_name);
791 goto err_fw_req;
792 }
793
794 /* Download firmware */
795 ret = ath9k_hif_usb_download_fw(hif_dev);
796 if (ret) {
797 dev_err(&hif_dev->udev->dev,
798 "ath9k_htc: Firmware - %s download failed\n", fw_name);
799 goto err_fw_download;
800 }
801
802 /* Alloc URBs */
803 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
804 if (ret) {
805 dev_err(&hif_dev->udev->dev,
806 "ath9k_htc: Unable to allocate URBs\n");
807 goto err_urb;
808 }
809
810 return 0;
811
812err_urb:
813 /* Nothing */
814err_fw_download:
815 release_firmware(hif_dev->firmware);
816err_fw_req:
817 hif_dev->firmware = NULL;
818 return ret;
819}
820
821static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
822{
823 usb_kill_anchored_urbs(&hif_dev->regout_submitted);
824 ath9k_hif_usb_dealloc_reg_in_urb(hif_dev);
825 ath9k_hif_usb_dealloc_tx_urbs(hif_dev);
826 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
827}
828
829static void ath9k_hif_usb_dev_deinit(struct hif_device_usb *hif_dev)
830{
831 ath9k_hif_usb_dealloc_urbs(hif_dev);
832 if (hif_dev->firmware)
833 release_firmware(hif_dev->firmware);
834}
835
836static int ath9k_hif_usb_probe(struct usb_interface *interface,
837 const struct usb_device_id *id)
838{
839 struct usb_device *udev = interface_to_usbdev(interface);
840 struct hif_device_usb *hif_dev;
841 const char *fw_name = (const char *) id->driver_info;
842 int ret = 0;
843
844 hif_dev = kzalloc(sizeof(struct hif_device_usb), GFP_KERNEL);
845 if (!hif_dev) {
846 ret = -ENOMEM;
847 goto err_alloc;
848 }
849
850 usb_get_dev(udev);
851 hif_dev->udev = udev;
852 hif_dev->interface = interface;
853 hif_dev->device_id = id->idProduct;
854#ifdef CONFIG_PM
855 udev->reset_resume = 1;
856#endif
857 usb_set_intfdata(interface, hif_dev);
858
859 ret = ath9k_hif_usb_dev_init(hif_dev, fw_name);
860 if (ret) {
861 ret = -EINVAL;
862 goto err_hif_init_usb;
863 }
864
865 hif_dev->htc_handle = ath9k_htc_hw_alloc(hif_dev);
866 if (hif_dev->htc_handle == NULL) {
867 ret = -ENOMEM;
868 goto err_htc_hw_alloc;
869 }
870
871 ret = ath9k_htc_hw_init(&hif_usb, hif_dev->htc_handle, hif_dev,
872 &hif_dev->udev->dev, hif_dev->device_id,
873 ATH9K_HIF_USB);
874 if (ret) {
875 ret = -EINVAL;
876 goto err_htc_hw_init;
877 }
878
879 dev_info(&hif_dev->udev->dev, "ath9k_htc: USB layer initialized\n");
880
881 return 0;
882
883err_htc_hw_init:
884 ath9k_htc_hw_free(hif_dev->htc_handle);
885err_htc_hw_alloc:
886 ath9k_hif_usb_dev_deinit(hif_dev);
887err_hif_init_usb:
888 usb_set_intfdata(interface, NULL);
889 kfree(hif_dev);
890 usb_put_dev(udev);
891err_alloc:
892 return ret;
893}
894
895static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
896{
897 struct usb_device *udev = interface_to_usbdev(interface);
898 struct hif_device_usb *hif_dev =
899 (struct hif_device_usb *) usb_get_intfdata(interface);
900
901 if (hif_dev) {
902 ath9k_htc_hw_deinit(hif_dev->htc_handle, true);
903 ath9k_htc_hw_free(hif_dev->htc_handle);
904 ath9k_hif_usb_dev_deinit(hif_dev);
905 usb_set_intfdata(interface, NULL);
906 }
907
908 if (hif_dev->flags & HIF_USB_START)
909 usb_reset_device(udev);
910
911 kfree(hif_dev);
912 dev_info(&udev->dev, "ath9k_htc: USB layer deinitialized\n");
913 usb_put_dev(udev);
914}
915
916#ifdef CONFIG_PM
917static int ath9k_hif_usb_suspend(struct usb_interface *interface,
918 pm_message_t message)
919{
920 struct hif_device_usb *hif_dev =
921 (struct hif_device_usb *) usb_get_intfdata(interface);
922
923 ath9k_hif_usb_dealloc_urbs(hif_dev);
924
925 return 0;
926}
927
928static int ath9k_hif_usb_resume(struct usb_interface *interface)
929{
930 struct hif_device_usb *hif_dev =
931 (struct hif_device_usb *) usb_get_intfdata(interface);
932 int ret;
933
934 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
935 if (ret)
936 return ret;
937
938 if (hif_dev->firmware) {
939 ret = ath9k_hif_usb_download_fw(hif_dev);
940 if (ret)
941 goto fail_resume;
942 } else {
943 ath9k_hif_usb_dealloc_urbs(hif_dev);
944 return -EIO;
945 }
946
947 mdelay(100);
948
949 ret = ath9k_htc_resume(hif_dev->htc_handle);
950
951 if (ret)
952 goto fail_resume;
953
954 return 0;
955
956fail_resume:
957 ath9k_hif_usb_dealloc_urbs(hif_dev);
958
959 return ret;
960}
961#endif
962
963static struct usb_driver ath9k_hif_usb_driver = {
964 .name = "ath9k_hif_usb",
965 .probe = ath9k_hif_usb_probe,
966 .disconnect = ath9k_hif_usb_disconnect,
967#ifdef CONFIG_PM
968 .suspend = ath9k_hif_usb_suspend,
969 .resume = ath9k_hif_usb_resume,
970 .reset_resume = ath9k_hif_usb_resume,
971#endif
972 .id_table = ath9k_hif_usb_ids,
973 .soft_unbind = 1,
974};
975
976int ath9k_hif_usb_init(void)
977{
978 return usb_register(&ath9k_hif_usb_driver);
979}
980
981void ath9k_hif_usb_exit(void)
982{
983 usb_deregister(&ath9k_hif_usb_driver);
984}
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
new file mode 100644
index 000000000000..7d49a8af420e
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -0,0 +1,105 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_USB_H
18#define HTC_USB_H
19
20#define AR9271_FIRMWARE 0x501000
21#define AR9271_FIRMWARE_TEXT 0x903000
22
23#define FIRMWARE_DOWNLOAD 0x30
24#define FIRMWARE_DOWNLOAD_COMP 0x31
25
26#define ATH_USB_RX_STREAM_MODE_TAG 0x4e00
27#define ATH_USB_TX_STREAM_MODE_TAG 0x697e
28
29/* FIXME: Verify these numbers (with Windows) */
30#define MAX_TX_URB_NUM 8
31#define MAX_TX_BUF_NUM 1024
32#define MAX_TX_BUF_SIZE 32768
33#define MAX_TX_AGGR_NUM 20
34
35#define MAX_RX_URB_NUM 8
36#define MAX_RX_BUF_SIZE 16384
37#define MAX_PKT_NUM_IN_TRANSFER 10
38
39#define MAX_REG_OUT_URB_NUM 1
40#define MAX_REG_OUT_BUF_NUM 8
41
42#define MAX_REG_IN_BUF_SIZE 64
43
44/* USB Endpoint definition */
45#define USB_WLAN_TX_PIPE 1
46#define USB_WLAN_RX_PIPE 2
47#define USB_REG_IN_PIPE 3
48#define USB_REG_OUT_PIPE 4
49
50#define HIF_USB_MAX_RXPIPES 2
51#define HIF_USB_MAX_TXPIPES 4
52
53struct tx_buf {
54 u8 *buf;
55 u16 len;
56 u16 offset;
57 struct urb *urb;
58 struct sk_buff_head skb_queue;
59 struct hif_device_usb *hif_dev;
60 struct list_head list;
61};
62
63#define HIF_USB_TX_STOP BIT(0)
64#define HIF_USB_TX_FLUSH BIT(1)
65
66struct hif_usb_tx {
67 u8 flags;
68 u8 tx_buf_cnt;
69 u16 tx_skb_cnt;
70 struct sk_buff_head tx_skb_queue;
71 struct list_head tx_buf;
72 struct list_head tx_pending;
73 spinlock_t tx_lock;
74};
75
76struct cmd_buf {
77 struct sk_buff *skb;
78 struct hif_device_usb *hif_dev;
79};
80
81#define HIF_USB_START BIT(0)
82
83struct hif_device_usb {
84 u16 device_id;
85 struct usb_device *udev;
86 struct usb_interface *interface;
87 const struct firmware *firmware;
88 struct htc_target *htc_handle;
89 struct hif_usb_tx tx;
90 struct urb *reg_in_urb;
91 struct usb_anchor regout_submitted;
92 struct usb_anchor rx_submitted;
93 struct sk_buff *remain_skb;
94 int rx_remain_len;
95 int rx_pkt_len;
96 int rx_transfer_len;
97 int rx_pad_len;
98 spinlock_t rx_lock;
99 u8 flags; /* HIF_USB_* */
100};
101
102int ath9k_hif_usb_init(void);
103void ath9k_hif_usb_exit(void);
104
105#endif /* HTC_USB_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
new file mode 100644
index 000000000000..78213fc71b09
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -0,0 +1,462 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_H
18#define HTC_H
19
20#include <linux/module.h>
21#include <linux/usb.h>
22#include <linux/firmware.h>
23#include <linux/skbuff.h>
24#include <linux/netdevice.h>
25#include <linux/leds.h>
26#include <net/mac80211.h>
27
28#include "common.h"
29#include "htc_hst.h"
30#include "hif_usb.h"
31#include "wmi.h"
32
33#define ATH_STA_SHORT_CALINTERVAL 1000 /* 1 second */
34#define ATH_ANI_POLLINTERVAL 100 /* 100 ms */
35#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
36#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
37
38#define ATH_DEFAULT_BMISS_LIMIT 10
39#define IEEE80211_MS_TO_TU(x) (((x) * 1000) / 1024)
40#define TSF_TO_TU(_h, _l) \
41 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
42
43extern struct ieee80211_ops ath9k_htc_ops;
44extern int htc_modparam_nohwcrypt;
45
46enum htc_phymode {
47 HTC_MODE_AUTO = 0,
48 HTC_MODE_11A = 1,
49 HTC_MODE_11B = 2,
50 HTC_MODE_11G = 3,
51 HTC_MODE_FH = 4,
52 HTC_MODE_TURBO_A = 5,
53 HTC_MODE_TURBO_G = 6,
54 HTC_MODE_11NA = 7,
55 HTC_MODE_11NG = 8
56};
57
58enum htc_opmode {
59 HTC_M_STA = 1,
60 HTC_M_IBSS = 0,
61 HTC_M_AHDEMO = 3,
62 HTC_M_HOSTAP = 6,
63 HTC_M_MONITOR = 8,
64 HTC_M_WDS = 2
65};
66
67#define ATH9K_HTC_HDRSPACE sizeof(struct htc_frame_hdr)
68#define ATH9K_HTC_AMPDU 1
69#define ATH9K_HTC_NORMAL 2
70
71#define ATH9K_HTC_TX_CTSONLY 0x1
72#define ATH9K_HTC_TX_RTSCTS 0x2
73#define ATH9K_HTC_TX_USE_MIN_RATE 0x100
74
75struct tx_frame_hdr {
76 u8 data_type;
77 u8 node_idx;
78 u8 vif_idx;
79 u8 tidno;
80 u32 flags; /* ATH9K_HTC_TX_* */
81 u8 key_type;
82 u8 keyix;
83 u8 reserved[26];
84} __packed;
85
86struct tx_mgmt_hdr {
87 u8 node_idx;
88 u8 vif_idx;
89 u8 tidno;
90 u8 flags;
91 u8 key_type;
92 u8 keyix;
93 u16 reserved;
94} __packed;
95
96struct tx_beacon_header {
97 u8 len_changed;
98 u8 vif_index;
99 u16 rev;
100} __packed;
101
102struct ath9k_htc_target_hw {
103 u32 flags;
104 u32 flags_ext;
105 u32 ampdu_limit;
106 u8 ampdu_subframes;
107 u8 tx_chainmask;
108 u8 tx_chainmask_legacy;
109 u8 rtscts_ratecode;
110 u8 protmode;
111} __packed;
112
113struct ath9k_htc_cap_target {
114 u32 flags;
115 u32 flags_ext;
116 u32 ampdu_limit;
117 u8 ampdu_subframes;
118 u8 tx_chainmask;
119 u8 tx_chainmask_legacy;
120 u8 rtscts_ratecode;
121 u8 protmode;
122} __packed;
123
124struct ath9k_htc_target_vif {
125 u8 index;
126 u8 des_bssid[ETH_ALEN];
127 enum htc_opmode opmode;
128 u8 myaddr[ETH_ALEN];
129 u8 bssid[ETH_ALEN];
130 u32 flags;
131 u32 flags_ext;
132 u16 ps_sta;
133 u16 rtsthreshold;
134 u8 ath_cap;
135 u8 node;
136 s8 mcast_rate;
137} __packed;
138
139#define ATH_HTC_STA_AUTH 0x0001
140#define ATH_HTC_STA_QOS 0x0002
141#define ATH_HTC_STA_ERP 0x0004
142#define ATH_HTC_STA_HT 0x0008
143
144/* FIXME: UAPSD variables */
145struct ath9k_htc_target_sta {
146 u16 associd;
147 u16 txpower;
148 u32 ucastkey;
149 u8 macaddr[ETH_ALEN];
150 u8 bssid[ETH_ALEN];
151 u8 sta_index;
152 u8 vif_index;
153 u8 vif_sta;
154 u16 flags; /* ATH_HTC_STA_* */
155 u16 htcap;
156 u8 valid;
157 u16 capinfo;
158 struct ath9k_htc_target_hw *hw;
159 struct ath9k_htc_target_vif *vif;
160 u16 txseqmgmt;
161 u8 is_vif_sta;
162 u16 maxampdu;
163 u16 iv16;
164 u32 iv32;
165} __packed;
166
167struct ath9k_htc_target_aggr {
168 u8 sta_index;
169 u8 tidno;
170 u8 aggr_enable;
171 u8 padding;
172} __packed;
173
174#define ATH_HTC_RATE_MAX 30
175
176#define WLAN_RC_DS_FLAG 0x01
177#define WLAN_RC_40_FLAG 0x02
178#define WLAN_RC_SGI_FLAG 0x04
179#define WLAN_RC_HT_FLAG 0x08
180
181struct ath9k_htc_rateset {
182 u8 rs_nrates;
183 u8 rs_rates[ATH_HTC_RATE_MAX];
184};
185
186struct ath9k_htc_rate {
187 struct ath9k_htc_rateset legacy_rates;
188 struct ath9k_htc_rateset ht_rates;
189} __packed;
190
191struct ath9k_htc_target_rate {
192 u8 sta_index;
193 u8 isnew;
194 u32 capflags;
195 struct ath9k_htc_rate rates;
196};
197
198struct ath9k_htc_target_stats {
199 u32 tx_shortretry;
200 u32 tx_longretry;
201 u32 tx_xretries;
202 u32 ht_txunaggr_xretry;
203 u32 ht_tx_xretries;
204} __packed;
205
206struct ath9k_htc_vif {
207 u8 index;
208};
209
210#define ATH9K_HTC_MAX_STA 8
211#define ATH9K_HTC_MAX_TID 8
212
213enum tid_aggr_state {
214 AGGR_STOP = 0,
215 AGGR_PROGRESS,
216 AGGR_START,
217 AGGR_OPERATIONAL
218};
219
220struct ath9k_htc_sta {
221 u8 index;
222 enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
223};
224
225struct ath9k_htc_aggr_work {
226 u16 tid;
227 u8 sta_addr[ETH_ALEN];
228 struct ieee80211_hw *hw;
229 struct ieee80211_vif *vif;
230 enum ieee80211_ampdu_mlme_action action;
231 struct mutex mutex;
232};
233
234#define ATH9K_HTC_RXBUF 256
235#define HTC_RX_FRAME_HEADER_SIZE 40
236
237struct ath9k_htc_rxbuf {
238 bool in_process;
239 struct sk_buff *skb;
240 struct ath_htc_rx_status rxstatus;
241 struct list_head list;
242};
243
244struct ath9k_htc_rx {
245 int last_rssi; /* FIXME: per-STA */
246 struct list_head rxbuf;
247 spinlock_t rxbuflock;
248};
249
250struct ath9k_htc_tx_ctl {
251 u8 type; /* ATH9K_HTC_* */
252};
253
254#ifdef CONFIG_ATH9K_HTC_DEBUGFS
255
256#define TX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.tx_stats.c++)
257#define RX_STAT_INC(c) (hif_dev->htc_handle->drv_priv->debug.rx_stats.c++)
258
259struct ath_tx_stats {
260 u32 buf_queued;
261 u32 buf_completed;
262 u32 skb_queued;
263 u32 skb_completed;
264};
265
266struct ath_rx_stats {
267 u32 skb_allocated;
268 u32 skb_completed;
269 u32 skb_dropped;
270};
271
272struct ath9k_debug {
273 struct dentry *debugfs_phy;
274 struct dentry *debugfs_tgt_stats;
275 struct dentry *debugfs_xmit;
276 struct dentry *debugfs_recv;
277 struct ath_tx_stats tx_stats;
278 struct ath_rx_stats rx_stats;
279 u32 txrate;
280};
281
282#else
283
284#define TX_STAT_INC(c) do { } while (0)
285#define RX_STAT_INC(c) do { } while (0)
286
287#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
288
289#define ATH_LED_PIN_DEF 1
290#define ATH_LED_PIN_9287 8
291#define ATH_LED_PIN_9271 15
292#define ATH_LED_ON_DURATION_IDLE 350 /* in msecs */
293#define ATH_LED_OFF_DURATION_IDLE 250 /* in msecs */
294
295enum ath_led_type {
296 ATH_LED_RADIO,
297 ATH_LED_ASSOC,
298 ATH_LED_TX,
299 ATH_LED_RX
300};
301
302struct ath_led {
303 struct ath9k_htc_priv *priv;
304 struct led_classdev led_cdev;
305 enum ath_led_type led_type;
306 struct delayed_work brightness_work;
307 char name[32];
308 bool registered;
309 int brightness;
310};
311
312struct htc_beacon_config {
313 u16 beacon_interval;
314 u16 listen_interval;
315 u16 dtim_period;
316 u16 bmiss_timeout;
317 u8 dtim_count;
318};
319
320#define OP_INVALID BIT(0)
321#define OP_SCANNING BIT(1)
322#define OP_FULL_RESET BIT(2)
323#define OP_LED_ASSOCIATED BIT(3)
324#define OP_LED_ON BIT(4)
325#define OP_PREAMBLE_SHORT BIT(5)
326#define OP_PROTECT_ENABLE BIT(6)
327#define OP_TXAGGR BIT(7)
328#define OP_ASSOCIATED BIT(8)
329#define OP_ENABLE_BEACON BIT(9)
330#define OP_LED_DEINIT BIT(10)
331
332struct ath9k_htc_priv {
333 struct device *dev;
334 struct ieee80211_hw *hw;
335 struct ath_hw *ah;
336 struct htc_target *htc;
337 struct wmi *wmi;
338
339 enum htc_endpoint_id wmi_cmd_ep;
340 enum htc_endpoint_id beacon_ep;
341 enum htc_endpoint_id cab_ep;
342 enum htc_endpoint_id uapsd_ep;
343 enum htc_endpoint_id mgmt_ep;
344 enum htc_endpoint_id data_be_ep;
345 enum htc_endpoint_id data_bk_ep;
346 enum htc_endpoint_id data_vi_ep;
347 enum htc_endpoint_id data_vo_ep;
348
349 u16 op_flags;
350 u16 curtxpow;
351 u16 txpowlimit;
352 u16 nvifs;
353 u16 nstations;
354 u16 seq_no;
355 u32 bmiss_cnt;
356
357 struct sk_buff *beacon;
358 spinlock_t beacon_lock;
359
360 bool tx_queues_stop;
361 spinlock_t tx_lock;
362
363 struct ieee80211_vif *vif;
364 struct htc_beacon_config cur_beacon_conf;
365 unsigned int rxfilter;
366 struct tasklet_struct wmi_tasklet;
367 struct tasklet_struct rx_tasklet;
368 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
369 struct ath9k_htc_rx rx;
370 struct tasklet_struct tx_tasklet;
371 struct sk_buff_head tx_queue;
372 struct ath9k_htc_aggr_work aggr_work;
373 struct delayed_work ath9k_aggr_work;
374 struct delayed_work ath9k_ani_work;
375 struct work_struct ps_work;
376
377 struct mutex htc_pm_lock;
378 unsigned long ps_usecount;
379 bool ps_enabled;
380
381 struct ath_led radio_led;
382 struct ath_led assoc_led;
383 struct ath_led tx_led;
384 struct ath_led rx_led;
385 struct delayed_work ath9k_led_blink_work;
386 int led_on_duration;
387 int led_off_duration;
388 int led_on_cnt;
389 int led_off_cnt;
390 int hwq_map[ATH9K_WME_AC_VO+1];
391
392#ifdef CONFIG_ATH9K_HTC_DEBUGFS
393 struct ath9k_debug debug;
394#endif
395 struct ath9k_htc_target_rate tgt_rate;
396
397 struct mutex mutex;
398};
399
400static inline void ath_read_cachesize(struct ath_common *common, int *csz)
401{
402 common->bus_ops->read_cachesize(common, csz);
403}
404
405void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
406 struct ieee80211_vif *vif);
407void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending);
408void ath9k_htc_beacon_update(struct ath9k_htc_priv *priv,
409 struct ieee80211_vif *vif);
410
411void ath9k_htc_rxep(void *priv, struct sk_buff *skb,
412 enum htc_endpoint_id ep_id);
413void ath9k_htc_txep(void *priv, struct sk_buff *skb, enum htc_endpoint_id ep_id,
414 bool txok);
415
416void ath9k_htc_station_work(struct work_struct *work);
417void ath9k_htc_aggr_work(struct work_struct *work);
418void ath9k_ani_work(struct work_struct *work);;
419
420int ath9k_tx_init(struct ath9k_htc_priv *priv);
421void ath9k_tx_tasklet(unsigned long data);
422int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb);
423void ath9k_tx_cleanup(struct ath9k_htc_priv *priv);
424bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
425 enum ath9k_tx_queue_subtype qtype);
426int get_hw_qnum(u16 queue, int *hwq_map);
427int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
428 struct ath9k_tx_queue_info *qinfo);
429
430int ath9k_rx_init(struct ath9k_htc_priv *priv);
431void ath9k_rx_cleanup(struct ath9k_htc_priv *priv);
432void ath9k_host_rx_init(struct ath9k_htc_priv *priv);
433void ath9k_rx_tasklet(unsigned long data);
434u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv);
435
436void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv);
437void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
438void ath9k_ps_work(struct work_struct *work);
439
440void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
441void ath9k_init_leds(struct ath9k_htc_priv *priv);
442void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
443
444int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
445 u16 devid);
446void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
447#ifdef CONFIG_PM
448int ath9k_htc_resume(struct htc_target *htc_handle);
449#endif
450#ifdef CONFIG_ATH9K_HTC_DEBUGFS
451int ath9k_htc_debug_create_root(void);
452void ath9k_htc_debug_remove_root(void);
453int ath9k_htc_init_debug(struct ath_hw *ah);
454void ath9k_htc_exit_debug(struct ath_hw *ah);
455#else
456static inline int ath9k_htc_debug_create_root(void) { return 0; };
457static inline void ath9k_htc_debug_remove_root(void) {};
458static inline int ath9k_htc_init_debug(struct ath_hw *ah) { return 0; };
459static inline void ath9k_htc_exit_debug(struct ath_hw *ah) {};
460#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
461
462#endif /* HTC_H */
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
new file mode 100644
index 000000000000..5e21f4d92ff5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -0,0 +1,277 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#define FUDGE 2
20
21static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
22 struct htc_beacon_config *bss_conf)
23{
24 struct ath_common *common = ath9k_hw_common(priv->ah);
25 struct ath9k_beacon_state bs;
26 enum ath9k_int imask = 0;
27 int dtimperiod, dtimcount, sleepduration;
28 int cfpperiod, cfpcount, bmiss_timeout;
29 u32 nexttbtt = 0, intval, tsftu, htc_imask = 0;
30 u64 tsf;
31 int num_beacons, offset, dtim_dec_count, cfp_dec_count;
32 int ret;
33 u8 cmd_rsp;
34
35 memset(&bs, 0, sizeof(bs));
36
37 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
38 bmiss_timeout = (ATH_DEFAULT_BMISS_LIMIT * bss_conf->beacon_interval);
39
40 /*
41 * Setup dtim and cfp parameters according to
42 * last beacon we received (which may be none).
43 */
44 dtimperiod = bss_conf->dtim_period;
45 if (dtimperiod <= 0) /* NB: 0 if not known */
46 dtimperiod = 1;
47 dtimcount = 1;
48 if (dtimcount >= dtimperiod) /* NB: sanity check */
49 dtimcount = 0;
50 cfpperiod = 1; /* NB: no PCF support yet */
51 cfpcount = 0;
52
53 sleepduration = intval;
54 if (sleepduration <= 0)
55 sleepduration = intval;
56
57 /*
58 * Pull nexttbtt forward to reflect the current
59 * TSF and calculate dtim+cfp state for the result.
60 */
61 tsf = ath9k_hw_gettsf64(priv->ah);
62 tsftu = TSF_TO_TU(tsf>>32, tsf) + FUDGE;
63
64 num_beacons = tsftu / intval + 1;
65 offset = tsftu % intval;
66 nexttbtt = tsftu - offset;
67 if (offset)
68 nexttbtt += intval;
69
70 /* DTIM Beacon every dtimperiod Beacon */
71 dtim_dec_count = num_beacons % dtimperiod;
72 /* CFP every cfpperiod DTIM Beacon */
73 cfp_dec_count = (num_beacons / dtimperiod) % cfpperiod;
74 if (dtim_dec_count)
75 cfp_dec_count++;
76
77 dtimcount -= dtim_dec_count;
78 if (dtimcount < 0)
79 dtimcount += dtimperiod;
80
81 cfpcount -= cfp_dec_count;
82 if (cfpcount < 0)
83 cfpcount += cfpperiod;
84
85 bs.bs_intval = intval;
86 bs.bs_nexttbtt = nexttbtt;
87 bs.bs_dtimperiod = dtimperiod*intval;
88 bs.bs_nextdtim = bs.bs_nexttbtt + dtimcount*intval;
89 bs.bs_cfpperiod = cfpperiod*bs.bs_dtimperiod;
90 bs.bs_cfpnext = bs.bs_nextdtim + cfpcount*bs.bs_dtimperiod;
91 bs.bs_cfpmaxduration = 0;
92
93 /*
94 * Calculate the number of consecutive beacons to miss* before taking
95 * a BMISS interrupt. The configuration is specified in TU so we only
96 * need calculate based on the beacon interval. Note that we clamp the
97 * result to at most 15 beacons.
98 */
99 if (sleepduration > intval) {
100 bs.bs_bmissthreshold = ATH_DEFAULT_BMISS_LIMIT / 2;
101 } else {
102 bs.bs_bmissthreshold = DIV_ROUND_UP(bmiss_timeout, intval);
103 if (bs.bs_bmissthreshold > 15)
104 bs.bs_bmissthreshold = 15;
105 else if (bs.bs_bmissthreshold <= 0)
106 bs.bs_bmissthreshold = 1;
107 }
108
109 /*
110 * Calculate sleep duration. The configuration is given in ms.
111 * We ensure a multiple of the beacon period is used. Also, if the sleep
112 * duration is greater than the DTIM period then it makes senses
113 * to make it a multiple of that.
114 *
115 * XXX fixed at 100ms
116 */
117
118 bs.bs_sleepduration = roundup(IEEE80211_MS_TO_TU(100), sleepduration);
119 if (bs.bs_sleepduration > bs.bs_dtimperiod)
120 bs.bs_sleepduration = bs.bs_dtimperiod;
121
122 /* TSF out of range threshold fixed at 1 second */
123 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
124
125 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
126 ath_print(common, ATH_DBG_BEACON,
127 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
128 bs.bs_bmissthreshold, bs.bs_sleepduration,
129 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
130
131 /* Set the computed STA beacon timers */
132
133 WMI_CMD(WMI_DISABLE_INTR_CMDID);
134 ath9k_hw_set_sta_beacon_timers(priv->ah, &bs);
135 imask |= ATH9K_INT_BMISS;
136 htc_imask = cpu_to_be32(imask);
137 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
138}
139
140static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
141 struct htc_beacon_config *bss_conf)
142{
143 struct ath_common *common = ath9k_hw_common(priv->ah);
144 enum ath9k_int imask = 0;
145 u32 nexttbtt, intval, htc_imask = 0;
146 int ret;
147 u8 cmd_rsp;
148
149 intval = bss_conf->beacon_interval & ATH9K_BEACON_PERIOD;
150 nexttbtt = intval;
151 intval |= ATH9K_BEACON_ENA;
152 if (priv->op_flags & OP_ENABLE_BEACON)
153 imask |= ATH9K_INT_SWBA;
154
155 ath_print(common, ATH_DBG_BEACON,
156 "IBSS Beacon config, intval: %d, imask: 0x%x\n",
157 bss_conf->beacon_interval, imask);
158
159 WMI_CMD(WMI_DISABLE_INTR_CMDID);
160 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
161 priv->bmiss_cnt = 0;
162 htc_imask = cpu_to_be32(imask);
163 WMI_CMD_BUF(WMI_ENABLE_INTR_CMDID, &htc_imask);
164}
165
166void ath9k_htc_beacon_update(struct ath9k_htc_priv *priv,
167 struct ieee80211_vif *vif)
168{
169 struct ath_common *common = ath9k_hw_common(priv->ah);
170
171 spin_lock_bh(&priv->beacon_lock);
172
173 if (priv->beacon)
174 dev_kfree_skb_any(priv->beacon);
175
176 priv->beacon = ieee80211_beacon_get(priv->hw, vif);
177 if (!priv->beacon)
178 ath_print(common, ATH_DBG_BEACON,
179 "Unable to allocate beacon\n");
180
181 spin_unlock_bh(&priv->beacon_lock);
182}
183
184void ath9k_htc_swba(struct ath9k_htc_priv *priv, u8 beacon_pending)
185{
186 struct ath9k_htc_vif *avp = (void *)priv->vif->drv_priv;
187 struct tx_beacon_header beacon_hdr;
188 struct ath9k_htc_tx_ctl tx_ctl;
189 struct ieee80211_tx_info *info;
190 u8 *tx_fhdr;
191
192 memset(&beacon_hdr, 0, sizeof(struct tx_beacon_header));
193 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
194
195 /* FIXME: Handle BMISS */
196 if (beacon_pending != 0) {
197 priv->bmiss_cnt++;
198 return;
199 }
200
201 spin_lock_bh(&priv->beacon_lock);
202
203 if (unlikely(priv->op_flags & OP_SCANNING)) {
204 spin_unlock_bh(&priv->beacon_lock);
205 return;
206 }
207
208 if (unlikely(priv->beacon == NULL)) {
209 spin_unlock_bh(&priv->beacon_lock);
210 return;
211 }
212
213 /* Free the old SKB first */
214 dev_kfree_skb_any(priv->beacon);
215
216 /* Get a new beacon */
217 priv->beacon = ieee80211_beacon_get(priv->hw, priv->vif);
218 if (!priv->beacon) {
219 spin_unlock_bh(&priv->beacon_lock);
220 return;
221 }
222
223 info = IEEE80211_SKB_CB(priv->beacon);
224 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
225 struct ieee80211_hdr *hdr =
226 (struct ieee80211_hdr *) priv->beacon->data;
227 priv->seq_no += 0x10;
228 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
229 hdr->seq_ctrl |= cpu_to_le16(priv->seq_no);
230 }
231
232 tx_ctl.type = ATH9K_HTC_NORMAL;
233 beacon_hdr.vif_index = avp->index;
234 tx_fhdr = skb_push(priv->beacon, sizeof(beacon_hdr));
235 memcpy(tx_fhdr, (u8 *) &beacon_hdr, sizeof(beacon_hdr));
236
237 htc_send(priv->htc, priv->beacon, priv->beacon_ep, &tx_ctl);
238
239 spin_unlock_bh(&priv->beacon_lock);
240}
241
242
243void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
244 struct ieee80211_vif *vif)
245{
246 struct ath_common *common = ath9k_hw_common(priv->ah);
247 enum nl80211_iftype iftype;
248 struct htc_beacon_config *cur_conf = &priv->cur_beacon_conf;
249
250 if (vif) {
251 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
252 iftype = vif->type;
253 cur_conf->beacon_interval = bss_conf->beacon_int;
254 cur_conf->dtim_period = bss_conf->dtim_period;
255 cur_conf->listen_interval = 1;
256 cur_conf->dtim_count = 1;
257 cur_conf->bmiss_timeout =
258 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
259 } else
260 iftype = priv->ah->opmode;
261
262 if (cur_conf->beacon_interval == 0)
263 cur_conf->beacon_interval = 100;
264
265 switch (iftype) {
266 case NL80211_IFTYPE_STATION:
267 ath9k_htc_beacon_config_sta(priv, cur_conf);
268 break;
269 case NL80211_IFTYPE_ADHOC:
270 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
271 break;
272 default:
273 ath_print(common, ATH_DBG_CONFIG,
274 "Unsupported beaconing mode\n");
275 return;
276 }
277}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
new file mode 100644
index 000000000000..aed53573c547
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -0,0 +1,723 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19MODULE_AUTHOR("Atheros Communications");
20MODULE_LICENSE("Dual BSD/GPL");
21MODULE_DESCRIPTION("Atheros driver 802.11n HTC based wireless devices");
22
23static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
24module_param_named(debug, ath9k_debug, uint, 0);
25MODULE_PARM_DESC(debug, "Debugging mask");
26
27int htc_modparam_nohwcrypt;
28module_param_named(nohwcrypt, htc_modparam_nohwcrypt, int, 0444);
29MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
30
31#define CHAN2G(_freq, _idx) { \
32 .center_freq = (_freq), \
33 .hw_value = (_idx), \
34 .max_power = 20, \
35}
36
37static struct ieee80211_channel ath9k_2ghz_channels[] = {
38 CHAN2G(2412, 0), /* Channel 1 */
39 CHAN2G(2417, 1), /* Channel 2 */
40 CHAN2G(2422, 2), /* Channel 3 */
41 CHAN2G(2427, 3), /* Channel 4 */
42 CHAN2G(2432, 4), /* Channel 5 */
43 CHAN2G(2437, 5), /* Channel 6 */
44 CHAN2G(2442, 6), /* Channel 7 */
45 CHAN2G(2447, 7), /* Channel 8 */
46 CHAN2G(2452, 8), /* Channel 9 */
47 CHAN2G(2457, 9), /* Channel 10 */
48 CHAN2G(2462, 10), /* Channel 11 */
49 CHAN2G(2467, 11), /* Channel 12 */
50 CHAN2G(2472, 12), /* Channel 13 */
51 CHAN2G(2484, 13), /* Channel 14 */
52};
53
54/* Atheros hardware rate code addition for short premble */
55#define SHPCHECK(__hw_rate, __flags) \
56 ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04) : 0)
57
58#define RATE(_bitrate, _hw_rate, _flags) { \
59 .bitrate = (_bitrate), \
60 .flags = (_flags), \
61 .hw_value = (_hw_rate), \
62 .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
63}
64
65static struct ieee80211_rate ath9k_legacy_rates[] = {
66 RATE(10, 0x1b, 0),
67 RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp : 0x1e */
68 RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE), /* shortp: 0x1d */
69 RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE), /* short: 0x1c */
70 RATE(60, 0x0b, 0),
71 RATE(90, 0x0f, 0),
72 RATE(120, 0x0a, 0),
73 RATE(180, 0x0e, 0),
74 RATE(240, 0x09, 0),
75 RATE(360, 0x0d, 0),
76 RATE(480, 0x08, 0),
77 RATE(540, 0x0c, 0),
78};
79
80static int ath9k_htc_wait_for_target(struct ath9k_htc_priv *priv)
81{
82 int time_left;
83
84 /* Firmware can take up to 50ms to get ready, to be safe use 1 second */
85 time_left = wait_for_completion_timeout(&priv->htc->target_wait, HZ);
86 if (!time_left) {
87 dev_err(priv->dev, "ath9k_htc: Target is unresponsive\n");
88 return -ETIMEDOUT;
89 }
90
91 return 0;
92}
93
94static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
95{
96 ath9k_htc_exit_debug(priv->ah);
97 ath9k_hw_deinit(priv->ah);
98 tasklet_kill(&priv->wmi_tasklet);
99 tasklet_kill(&priv->rx_tasklet);
100 tasklet_kill(&priv->tx_tasklet);
101 kfree(priv->ah);
102 priv->ah = NULL;
103}
104
105static void ath9k_deinit_device(struct ath9k_htc_priv *priv)
106{
107 struct ieee80211_hw *hw = priv->hw;
108
109 wiphy_rfkill_stop_polling(hw->wiphy);
110 ath9k_deinit_leds(priv);
111 ieee80211_unregister_hw(hw);
112 ath9k_rx_cleanup(priv);
113 ath9k_tx_cleanup(priv);
114 ath9k_deinit_priv(priv);
115}
116
117static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
118 u16 service_id,
119 void (*tx) (void *,
120 struct sk_buff *,
121 enum htc_endpoint_id,
122 bool txok),
123 enum htc_endpoint_id *ep_id)
124{
125 struct htc_service_connreq req;
126
127 memset(&req, 0, sizeof(struct htc_service_connreq));
128
129 req.service_id = service_id;
130 req.ep_callbacks.priv = priv;
131 req.ep_callbacks.rx = ath9k_htc_rxep;
132 req.ep_callbacks.tx = tx;
133
134 return htc_connect_service(priv->htc, &req, ep_id);
135}
136
137static int ath9k_init_htc_services(struct ath9k_htc_priv *priv)
138{
139 int ret;
140
141 /* WMI CMD*/
142 ret = ath9k_wmi_connect(priv->htc, priv->wmi, &priv->wmi_cmd_ep);
143 if (ret)
144 goto err;
145
146 /* Beacon */
147 ret = ath9k_htc_connect_svc(priv, WMI_BEACON_SVC, NULL,
148 &priv->beacon_ep);
149 if (ret)
150 goto err;
151
152 /* CAB */
153 ret = ath9k_htc_connect_svc(priv, WMI_CAB_SVC, ath9k_htc_txep,
154 &priv->cab_ep);
155 if (ret)
156 goto err;
157
158
159 /* UAPSD */
160 ret = ath9k_htc_connect_svc(priv, WMI_UAPSD_SVC, ath9k_htc_txep,
161 &priv->uapsd_ep);
162 if (ret)
163 goto err;
164
165 /* MGMT */
166 ret = ath9k_htc_connect_svc(priv, WMI_MGMT_SVC, ath9k_htc_txep,
167 &priv->mgmt_ep);
168 if (ret)
169 goto err;
170
171 /* DATA BE */
172 ret = ath9k_htc_connect_svc(priv, WMI_DATA_BE_SVC, ath9k_htc_txep,
173 &priv->data_be_ep);
174 if (ret)
175 goto err;
176
177 /* DATA BK */
178 ret = ath9k_htc_connect_svc(priv, WMI_DATA_BK_SVC, ath9k_htc_txep,
179 &priv->data_bk_ep);
180 if (ret)
181 goto err;
182
183 /* DATA VI */
184 ret = ath9k_htc_connect_svc(priv, WMI_DATA_VI_SVC, ath9k_htc_txep,
185 &priv->data_vi_ep);
186 if (ret)
187 goto err;
188
189 /* DATA VO */
190 ret = ath9k_htc_connect_svc(priv, WMI_DATA_VO_SVC, ath9k_htc_txep,
191 &priv->data_vo_ep);
192 if (ret)
193 goto err;
194
195 ret = htc_init(priv->htc);
196 if (ret)
197 goto err;
198
199 return 0;
200
201err:
202 dev_err(priv->dev, "ath9k_htc: Unable to initialize HTC services\n");
203 return ret;
204}
205
206static int ath9k_reg_notifier(struct wiphy *wiphy,
207 struct regulatory_request *request)
208{
209 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
210 struct ath9k_htc_priv *priv = hw->priv;
211
212 return ath_reg_notifier_apply(wiphy, request,
213 ath9k_hw_regulatory(priv->ah));
214}
215
216static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
217{
218 struct ath_hw *ah = (struct ath_hw *) hw_priv;
219 struct ath_common *common = ath9k_hw_common(ah);
220 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
221 __be32 val, reg = cpu_to_be32(reg_offset);
222 int r;
223
224 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_READ_CMDID,
225 (u8 *) &reg, sizeof(reg),
226 (u8 *) &val, sizeof(val),
227 100);
228 if (unlikely(r)) {
229 ath_print(common, ATH_DBG_WMI,
230 "REGISTER READ FAILED: (0x%04x, %d)\n",
231 reg_offset, r);
232 return -EIO;
233 }
234
235 return be32_to_cpu(val);
236}
237
238static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
239{
240 struct ath_hw *ah = (struct ath_hw *) hw_priv;
241 struct ath_common *common = ath9k_hw_common(ah);
242 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
243 __be32 buf[2] = {
244 cpu_to_be32(reg_offset),
245 cpu_to_be32(val),
246 };
247 int r;
248
249 r = ath9k_wmi_cmd(priv->wmi, WMI_REG_WRITE_CMDID,
250 (u8 *) &buf, sizeof(buf),
251 (u8 *) &val, sizeof(val),
252 100);
253 if (unlikely(r)) {
254 ath_print(common, ATH_DBG_WMI,
255 "REGISTER WRITE FAILED:(0x%04x, %d)\n",
256 reg_offset, r);
257 }
258}
259
260static const struct ath_ops ath9k_common_ops = {
261 .read = ath9k_ioread32,
262 .write = ath9k_iowrite32,
263};
264
265static void ath_usb_read_cachesize(struct ath_common *common, int *csz)
266{
267 *csz = L1_CACHE_BYTES >> 2;
268}
269
270static bool ath_usb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
271{
272 struct ath_hw *ah = (struct ath_hw *) common->ah;
273
274 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
275
276 if (!ath9k_hw_wait(ah,
277 AR_EEPROM_STATUS_DATA,
278 AR_EEPROM_STATUS_DATA_BUSY |
279 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
280 AH_WAIT_TIMEOUT))
281 return false;
282
283 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA),
284 AR_EEPROM_STATUS_DATA_VAL);
285
286 return true;
287}
288
289static const struct ath_bus_ops ath9k_usb_bus_ops = {
290 .ath_bus_type = ATH_USB,
291 .read_cachesize = ath_usb_read_cachesize,
292 .eeprom_read = ath_usb_eeprom_read,
293};
294
295static void setup_ht_cap(struct ath9k_htc_priv *priv,
296 struct ieee80211_sta_ht_cap *ht_info)
297{
298 ht_info->ht_supported = true;
299 ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
300 IEEE80211_HT_CAP_SM_PS |
301 IEEE80211_HT_CAP_SGI_40 |
302 IEEE80211_HT_CAP_DSSSCCK40;
303
304 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
305 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
306
307 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
308 ht_info->mcs.rx_mask[0] = 0xff;
309 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
310}
311
312static int ath9k_init_queues(struct ath9k_htc_priv *priv)
313{
314 struct ath_common *common = ath9k_hw_common(priv->ah);
315 int i;
316
317 for (i = 0; i < ARRAY_SIZE(priv->hwq_map); i++)
318 priv->hwq_map[i] = -1;
319
320 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BE)) {
321 ath_print(common, ATH_DBG_FATAL,
322 "Unable to setup xmit queue for BE traffic\n");
323 goto err;
324 }
325
326 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_BK)) {
327 ath_print(common, ATH_DBG_FATAL,
328 "Unable to setup xmit queue for BK traffic\n");
329 goto err;
330 }
331 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VI)) {
332 ath_print(common, ATH_DBG_FATAL,
333 "Unable to setup xmit queue for VI traffic\n");
334 goto err;
335 }
336 if (!ath9k_htc_txq_setup(priv, ATH9K_WME_AC_VO)) {
337 ath_print(common, ATH_DBG_FATAL,
338 "Unable to setup xmit queue for VO traffic\n");
339 goto err;
340 }
341
342 return 0;
343
344err:
345 return -EINVAL;
346}
347
348static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
349{
350 struct ath_common *common = ath9k_hw_common(priv->ah);
351 int i = 0;
352
353 /* Get the hardware key cache size. */
354 common->keymax = priv->ah->caps.keycache_size;
355 if (common->keymax > ATH_KEYMAX) {
356 ath_print(common, ATH_DBG_ANY,
357 "Warning, using only %u entries in %u key cache\n",
358 ATH_KEYMAX, common->keymax);
359 common->keymax = ATH_KEYMAX;
360 }
361
362 /*
363 * Reset the key cache since some parts do not
364 * reset the contents on initial power up.
365 */
366 for (i = 0; i < common->keymax; i++)
367 ath9k_hw_keyreset(priv->ah, (u16) i);
368
369 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
370 ATH9K_CIPHER_TKIP, NULL)) {
371 /*
372 * Whether we should enable h/w TKIP MIC.
373 * XXX: if we don't support WME TKIP MIC, then we wouldn't
374 * report WMM capable, so it's always safe to turn on
375 * TKIP MIC in this case.
376 */
377 ath9k_hw_setcapability(priv->ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
378 }
379
380 /*
381 * Check whether the separate key cache entries
382 * are required to handle both tx+rx MIC keys.
383 * With split mic keys the number of stations is limited
384 * to 27 otherwise 59.
385 */
386 if (ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
387 ATH9K_CIPHER_TKIP, NULL)
388 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_CIPHER,
389 ATH9K_CIPHER_MIC, NULL)
390 && ath9k_hw_getcapability(priv->ah, ATH9K_CAP_TKIP_SPLIT,
391 0, NULL))
392 common->splitmic = 1;
393
394 /* turn on mcast key search if possible */
395 if (!ath9k_hw_getcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
396 (void)ath9k_hw_setcapability(priv->ah, ATH9K_CAP_MCAST_KEYSRCH,
397 1, 1, NULL);
398}
399
400static void ath9k_init_channels_rates(struct ath9k_htc_priv *priv)
401{
402 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes)) {
403 priv->sbands[IEEE80211_BAND_2GHZ].channels =
404 ath9k_2ghz_channels;
405 priv->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
406 priv->sbands[IEEE80211_BAND_2GHZ].n_channels =
407 ARRAY_SIZE(ath9k_2ghz_channels);
408 priv->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
409 priv->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
410 ARRAY_SIZE(ath9k_legacy_rates);
411 }
412}
413
414static void ath9k_init_misc(struct ath9k_htc_priv *priv)
415{
416 struct ath_common *common = ath9k_hw_common(priv->ah);
417
418 common->tx_chainmask = priv->ah->caps.tx_chainmask;
419 common->rx_chainmask = priv->ah->caps.rx_chainmask;
420
421 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
422 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
423
424 priv->op_flags |= OP_TXAGGR;
425 priv->ah->opmode = NL80211_IFTYPE_STATION;
426}
427
428static int ath9k_init_priv(struct ath9k_htc_priv *priv, u16 devid)
429{
430 struct ath_hw *ah = NULL;
431 struct ath_common *common;
432 int ret = 0, csz = 0;
433
434 priv->op_flags |= OP_INVALID;
435
436 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
437 if (!ah)
438 return -ENOMEM;
439
440 ah->hw_version.devid = devid;
441 ah->hw_version.subsysid = 0; /* FIXME */
442 priv->ah = ah;
443
444 common = ath9k_hw_common(ah);
445 common->ops = &ath9k_common_ops;
446 common->bus_ops = &ath9k_usb_bus_ops;
447 common->ah = ah;
448 common->hw = priv->hw;
449 common->priv = priv;
450 common->debug_mask = ath9k_debug;
451
452 spin_lock_init(&priv->wmi->wmi_lock);
453 spin_lock_init(&priv->beacon_lock);
454 spin_lock_init(&priv->tx_lock);
455 mutex_init(&priv->mutex);
456 mutex_init(&priv->aggr_work.mutex);
457 mutex_init(&priv->htc_pm_lock);
458 tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet,
459 (unsigned long)priv);
460 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
461 (unsigned long)priv);
462 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv);
463 INIT_DELAYED_WORK(&priv->ath9k_aggr_work, ath9k_htc_aggr_work);
464 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
465 INIT_WORK(&priv->ps_work, ath9k_ps_work);
466
467 /*
468 * Cache line size is used to size and align various
469 * structures used to communicate with the hardware.
470 */
471 ath_read_cachesize(common, &csz);
472 common->cachelsz = csz << 2; /* convert to bytes */
473
474 ret = ath9k_hw_init(ah);
475 if (ret) {
476 ath_print(common, ATH_DBG_FATAL,
477 "Unable to initialize hardware; "
478 "initialization status: %d\n", ret);
479 goto err_hw;
480 }
481
482 ret = ath9k_htc_init_debug(ah);
483 if (ret) {
484 ath_print(common, ATH_DBG_FATAL,
485 "Unable to create debugfs files\n");
486 goto err_debug;
487 }
488
489 ret = ath9k_init_queues(priv);
490 if (ret)
491 goto err_queues;
492
493 ath9k_init_crypto(priv);
494 ath9k_init_channels_rates(priv);
495 ath9k_init_misc(priv);
496
497 return 0;
498
499err_queues:
500 ath9k_htc_exit_debug(ah);
501err_debug:
502 ath9k_hw_deinit(ah);
503err_hw:
504
505 kfree(ah);
506 priv->ah = NULL;
507
508 return ret;
509}
510
511static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
512 struct ieee80211_hw *hw)
513{
514 struct ath_common *common = ath9k_hw_common(priv->ah);
515
516 hw->flags = IEEE80211_HW_SIGNAL_DBM |
517 IEEE80211_HW_AMPDU_AGGREGATION |
518 IEEE80211_HW_SPECTRUM_MGMT |
519 IEEE80211_HW_HAS_RATE_CONTROL |
520 IEEE80211_HW_RX_INCLUDES_FCS |
521 IEEE80211_HW_SUPPORTS_PS |
522 IEEE80211_HW_PS_NULLFUNC_STACK;
523
524 hw->wiphy->interface_modes =
525 BIT(NL80211_IFTYPE_STATION) |
526 BIT(NL80211_IFTYPE_ADHOC);
527
528 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
529
530 hw->queues = 4;
531 hw->channel_change_time = 5000;
532 hw->max_listen_interval = 10;
533 hw->vif_data_size = sizeof(struct ath9k_htc_vif);
534 hw->sta_data_size = sizeof(struct ath9k_htc_sta);
535
536 /* tx_frame_hdr is larger than tx_mgmt_hdr anyway */
537 hw->extra_tx_headroom = sizeof(struct tx_frame_hdr) +
538 sizeof(struct htc_frame_hdr) + 4;
539
540 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
541 hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
542 &priv->sbands[IEEE80211_BAND_2GHZ];
543
544 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
545 if (test_bit(ATH9K_MODE_11G, priv->ah->caps.wireless_modes))
546 setup_ht_cap(priv,
547 &priv->sbands[IEEE80211_BAND_2GHZ].ht_cap);
548 }
549
550 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
551}
552
553static int ath9k_init_device(struct ath9k_htc_priv *priv, u16 devid)
554{
555 struct ieee80211_hw *hw = priv->hw;
556 struct ath_common *common;
557 struct ath_hw *ah;
558 int error = 0;
559 struct ath_regulatory *reg;
560
561 /* Bring up device */
562 error = ath9k_init_priv(priv, devid);
563 if (error != 0)
564 goto err_init;
565
566 ah = priv->ah;
567 common = ath9k_hw_common(ah);
568 ath9k_set_hw_capab(priv, hw);
569
570 /* Initialize regulatory */
571 error = ath_regd_init(&common->regulatory, priv->hw->wiphy,
572 ath9k_reg_notifier);
573 if (error)
574 goto err_regd;
575
576 reg = &common->regulatory;
577
578 /* Setup TX */
579 error = ath9k_tx_init(priv);
580 if (error != 0)
581 goto err_tx;
582
583 /* Setup RX */
584 error = ath9k_rx_init(priv);
585 if (error != 0)
586 goto err_rx;
587
588 /* Register with mac80211 */
589 error = ieee80211_register_hw(hw);
590 if (error)
591 goto err_register;
592
593 /* Handle world regulatory */
594 if (!ath_is_world_regd(reg)) {
595 error = regulatory_hint(hw->wiphy, reg->alpha2);
596 if (error)
597 goto err_world;
598 }
599
600 ath9k_init_leds(priv);
601 ath9k_start_rfkill_poll(priv);
602
603 return 0;
604
605err_world:
606 ieee80211_unregister_hw(hw);
607err_register:
608 ath9k_rx_cleanup(priv);
609err_rx:
610 ath9k_tx_cleanup(priv);
611err_tx:
612 /* Nothing */
613err_regd:
614 ath9k_deinit_priv(priv);
615err_init:
616 return error;
617}
618
619int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
620 u16 devid)
621{
622 struct ieee80211_hw *hw;
623 struct ath9k_htc_priv *priv;
624 int ret;
625
626 hw = ieee80211_alloc_hw(sizeof(struct ath9k_htc_priv), &ath9k_htc_ops);
627 if (!hw)
628 return -ENOMEM;
629
630 priv = hw->priv;
631 priv->hw = hw;
632 priv->htc = htc_handle;
633 priv->dev = dev;
634 htc_handle->drv_priv = priv;
635 SET_IEEE80211_DEV(hw, priv->dev);
636
637 ret = ath9k_htc_wait_for_target(priv);
638 if (ret)
639 goto err_free;
640
641 priv->wmi = ath9k_init_wmi(priv);
642 if (!priv->wmi) {
643 ret = -EINVAL;
644 goto err_free;
645 }
646
647 ret = ath9k_init_htc_services(priv);
648 if (ret)
649 goto err_init;
650
651 ret = ath9k_init_device(priv, devid);
652 if (ret)
653 goto err_init;
654
655 return 0;
656
657err_init:
658 ath9k_deinit_wmi(priv);
659err_free:
660 ieee80211_free_hw(hw);
661 return ret;
662}
663
664void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
665{
666 if (htc_handle->drv_priv) {
667 ath9k_deinit_device(htc_handle->drv_priv);
668 ath9k_deinit_wmi(htc_handle->drv_priv);
669 ieee80211_free_hw(htc_handle->drv_priv->hw);
670 }
671}
672
673#ifdef CONFIG_PM
674int ath9k_htc_resume(struct htc_target *htc_handle)
675{
676 int ret;
677
678 ret = ath9k_htc_wait_for_target(htc_handle->drv_priv);
679 if (ret)
680 return ret;
681
682 ret = ath9k_init_htc_services(htc_handle->drv_priv);
683 return ret;
684}
685#endif
686
687static int __init ath9k_htc_init(void)
688{
689 int error;
690
691 error = ath9k_htc_debug_create_root();
692 if (error < 0) {
693 printk(KERN_ERR
694 "ath9k_htc: Unable to create debugfs root: %d\n",
695 error);
696 goto err_dbg;
697 }
698
699 error = ath9k_hif_usb_init();
700 if (error < 0) {
701 printk(KERN_ERR
702 "ath9k_htc: No USB devices found,"
703 " driver not installed.\n");
704 error = -ENODEV;
705 goto err_usb;
706 }
707
708 return 0;
709
710err_usb:
711 ath9k_htc_debug_remove_root();
712err_dbg:
713 return error;
714}
715module_init(ath9k_htc_init);
716
717static void __exit ath9k_htc_exit(void)
718{
719 ath9k_hif_usb_exit();
720 ath9k_htc_debug_remove_root();
721 printk(KERN_INFO "ath9k_htc: Driver unloaded\n");
722}
723module_exit(ath9k_htc_exit);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
new file mode 100644
index 000000000000..eb7722b2cfcc
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -0,0 +1,1733 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19#ifdef CONFIG_ATH9K_HTC_DEBUGFS
20static struct dentry *ath9k_debugfs_root;
21#endif
22
23/*************/
24/* Utilities */
25/*************/
26
27static void ath_update_txpow(struct ath9k_htc_priv *priv)
28{
29 struct ath_hw *ah = priv->ah;
30 u32 txpow;
31
32 if (priv->curtxpow != priv->txpowlimit) {
33 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit);
34 /* read back in case value is clamped */
35 ath9k_hw_getcapability(ah, ATH9K_CAP_TXPOW, 1, &txpow);
36 priv->curtxpow = txpow;
37 }
38}
39
40/* HACK Alert: Use 11NG for 2.4, use 11NA for 5 */
41static enum htc_phymode ath9k_htc_get_curmode(struct ath9k_htc_priv *priv,
42 struct ath9k_channel *ichan)
43{
44 enum htc_phymode mode;
45
46 mode = HTC_MODE_AUTO;
47
48 switch (ichan->chanmode) {
49 case CHANNEL_G:
50 case CHANNEL_G_HT20:
51 case CHANNEL_G_HT40PLUS:
52 case CHANNEL_G_HT40MINUS:
53 mode = HTC_MODE_11NG;
54 break;
55 case CHANNEL_A:
56 case CHANNEL_A_HT20:
57 case CHANNEL_A_HT40PLUS:
58 case CHANNEL_A_HT40MINUS:
59 mode = HTC_MODE_11NA;
60 break;
61 default:
62 break;
63 }
64
65 return mode;
66}
67
68static bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
69 enum ath9k_power_mode mode)
70{
71 bool ret;
72
73 mutex_lock(&priv->htc_pm_lock);
74 ret = ath9k_hw_setpower(priv->ah, mode);
75 mutex_unlock(&priv->htc_pm_lock);
76
77 return ret;
78}
79
80void ath9k_htc_ps_wakeup(struct ath9k_htc_priv *priv)
81{
82 mutex_lock(&priv->htc_pm_lock);
83 if (++priv->ps_usecount != 1)
84 goto unlock;
85 ath9k_hw_setpower(priv->ah, ATH9K_PM_AWAKE);
86
87unlock:
88 mutex_unlock(&priv->htc_pm_lock);
89}
90
91void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv)
92{
93 mutex_lock(&priv->htc_pm_lock);
94 if (--priv->ps_usecount != 0)
95 goto unlock;
96
97 if (priv->ps_enabled)
98 ath9k_hw_setpower(priv->ah, ATH9K_PM_NETWORK_SLEEP);
99unlock:
100 mutex_unlock(&priv->htc_pm_lock);
101}
102
103void ath9k_ps_work(struct work_struct *work)
104{
105 struct ath9k_htc_priv *priv =
106 container_of(work, struct ath9k_htc_priv,
107 ps_work);
108 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
109
110 /* The chip wakes up after receiving the first beacon
111 while network sleep is enabled. For the driver to
112 be in sync with the hw, set the chip to awake and
113 only then set it to sleep.
114 */
115 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
116}
117
118static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
119 struct ieee80211_hw *hw,
120 struct ath9k_channel *hchan)
121{
122 struct ath_hw *ah = priv->ah;
123 struct ath_common *common = ath9k_hw_common(ah);
124 struct ieee80211_conf *conf = &common->hw->conf;
125 bool fastcc = true;
126 struct ieee80211_channel *channel = hw->conf.channel;
127 enum htc_phymode mode;
128 u16 htc_mode;
129 u8 cmd_rsp;
130 int ret;
131
132 if (priv->op_flags & OP_INVALID)
133 return -EIO;
134
135 if (priv->op_flags & OP_FULL_RESET)
136 fastcc = false;
137
138 /* Fiddle around with fastcc later on, for now just use full reset */
139 fastcc = false;
140 ath9k_htc_ps_wakeup(priv);
141 htc_stop(priv->htc);
142 WMI_CMD(WMI_DISABLE_INTR_CMDID);
143 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
144 WMI_CMD(WMI_STOP_RECV_CMDID);
145
146 ath_print(common, ATH_DBG_CONFIG,
147 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d\n",
148 priv->ah->curchan->channel,
149 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf));
150
151 ret = ath9k_hw_reset(ah, hchan, fastcc);
152 if (ret) {
153 ath_print(common, ATH_DBG_FATAL,
154 "Unable to reset channel (%u Mhz) "
155 "reset status %d\n", channel->center_freq, ret);
156 ath9k_htc_ps_restore(priv);
157 goto err;
158 }
159
160 ath_update_txpow(priv);
161
162 WMI_CMD(WMI_START_RECV_CMDID);
163 if (ret)
164 goto err;
165
166 ath9k_host_rx_init(priv);
167
168 mode = ath9k_htc_get_curmode(priv, hchan);
169 htc_mode = cpu_to_be16(mode);
170 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
171 if (ret)
172 goto err;
173
174 WMI_CMD(WMI_ENABLE_INTR_CMDID);
175 if (ret)
176 goto err;
177
178 htc_start(priv->htc);
179
180 priv->op_flags &= ~OP_FULL_RESET;
181err:
182 ath9k_htc_ps_restore(priv);
183 return ret;
184}
185
186static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
187{
188 struct ath_common *common = ath9k_hw_common(priv->ah);
189 struct ath9k_htc_target_vif hvif;
190 int ret = 0;
191 u8 cmd_rsp;
192
193 if (priv->nvifs > 0)
194 return -ENOBUFS;
195
196 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
197 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
198
199 hvif.opmode = cpu_to_be32(HTC_M_MONITOR);
200 priv->ah->opmode = NL80211_IFTYPE_MONITOR;
201 hvif.index = priv->nvifs;
202
203 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
204 if (ret)
205 return ret;
206
207 priv->nvifs++;
208 return 0;
209}
210
211static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
212{
213 struct ath_common *common = ath9k_hw_common(priv->ah);
214 struct ath9k_htc_target_vif hvif;
215 int ret = 0;
216 u8 cmd_rsp;
217
218 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
219 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
220 hvif.index = 0; /* Should do for now */
221 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
222 priv->nvifs--;
223
224 return ret;
225}
226
227static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
228 struct ieee80211_vif *vif,
229 struct ieee80211_sta *sta)
230{
231 struct ath_common *common = ath9k_hw_common(priv->ah);
232 struct ath9k_htc_target_sta tsta;
233 struct ath9k_htc_vif *avp = (struct ath9k_htc_vif *) vif->drv_priv;
234 struct ath9k_htc_sta *ista;
235 int ret;
236 u8 cmd_rsp;
237
238 if (priv->nstations >= ATH9K_HTC_MAX_STA)
239 return -ENOBUFS;
240
241 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
242
243 if (sta) {
244 ista = (struct ath9k_htc_sta *) sta->drv_priv;
245 memcpy(&tsta.macaddr, sta->addr, ETH_ALEN);
246 memcpy(&tsta.bssid, common->curbssid, ETH_ALEN);
247 tsta.associd = common->curaid;
248 tsta.is_vif_sta = 0;
249 tsta.valid = true;
250 ista->index = priv->nstations;
251 } else {
252 memcpy(&tsta.macaddr, vif->addr, ETH_ALEN);
253 tsta.is_vif_sta = 1;
254 }
255
256 tsta.sta_index = priv->nstations;
257 tsta.vif_index = avp->index;
258 tsta.maxampdu = 0xffff;
259 if (sta && sta->ht_cap.ht_supported)
260 tsta.flags = cpu_to_be16(ATH_HTC_STA_HT);
261
262 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
263 if (ret) {
264 if (sta)
265 ath_print(common, ATH_DBG_FATAL,
266 "Unable to add station entry for: %pM\n", sta->addr);
267 return ret;
268 }
269
270 if (sta)
271 ath_print(common, ATH_DBG_CONFIG,
272 "Added a station entry for: %pM (idx: %d)\n",
273 sta->addr, tsta.sta_index);
274
275 priv->nstations++;
276 return 0;
277}
278
279static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
280 struct ieee80211_vif *vif,
281 struct ieee80211_sta *sta)
282{
283 struct ath_common *common = ath9k_hw_common(priv->ah);
284 struct ath9k_htc_sta *ista;
285 int ret;
286 u8 cmd_rsp, sta_idx;
287
288 if (sta) {
289 ista = (struct ath9k_htc_sta *) sta->drv_priv;
290 sta_idx = ista->index;
291 } else {
292 sta_idx = 0;
293 }
294
295 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
296 if (ret) {
297 if (sta)
298 ath_print(common, ATH_DBG_FATAL,
299 "Unable to remove station entry for: %pM\n",
300 sta->addr);
301 return ret;
302 }
303
304 if (sta)
305 ath_print(common, ATH_DBG_CONFIG,
306 "Removed a station entry for: %pM (idx: %d)\n",
307 sta->addr, sta_idx);
308
309 priv->nstations--;
310 return 0;
311}
312
313static int ath9k_htc_update_cap_target(struct ath9k_htc_priv *priv)
314{
315 struct ath9k_htc_cap_target tcap;
316 int ret;
317 u8 cmd_rsp;
318
319 memset(&tcap, 0, sizeof(struct ath9k_htc_cap_target));
320
321 /* FIXME: Values are hardcoded */
322 tcap.flags = 0x240c40;
323 tcap.flags_ext = 0x80601000;
324 tcap.ampdu_limit = 0xffff0000;
325 tcap.ampdu_subframes = 20;
326 tcap.tx_chainmask_legacy = 1;
327 tcap.protmode = 1;
328 tcap.tx_chainmask = 1;
329
330 WMI_CMD_BUF(WMI_TARGET_IC_UPDATE_CMDID, &tcap);
331
332 return ret;
333}
334
335static int ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
336 struct ieee80211_vif *vif,
337 struct ieee80211_sta *sta)
338{
339 struct ath_common *common = ath9k_hw_common(priv->ah);
340 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
341 struct ieee80211_supported_band *sband;
342 struct ath9k_htc_target_rate trate;
343 u32 caps = 0;
344 u8 cmd_rsp;
345 int i, j, ret;
346
347 memset(&trate, 0, sizeof(trate));
348
349 /* Only 2GHz is supported */
350 sband = priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ];
351
352 for (i = 0, j = 0; i < sband->n_bitrates; i++) {
353 if (sta->supp_rates[sband->band] & BIT(i)) {
354 priv->tgt_rate.rates.legacy_rates.rs_rates[j]
355 = (sband->bitrates[i].bitrate * 2) / 10;
356 j++;
357 }
358 }
359 priv->tgt_rate.rates.legacy_rates.rs_nrates = j;
360
361 if (sta->ht_cap.ht_supported) {
362 for (i = 0, j = 0; i < 77; i++) {
363 if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
364 priv->tgt_rate.rates.ht_rates.rs_rates[j++] = i;
365 if (j == ATH_HTC_RATE_MAX)
366 break;
367 }
368 priv->tgt_rate.rates.ht_rates.rs_nrates = j;
369
370 caps = WLAN_RC_HT_FLAG;
371 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40)
372 caps |= WLAN_RC_40_FLAG;
373 if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
374 caps |= WLAN_RC_SGI_FLAG;
375
376 }
377
378 priv->tgt_rate.sta_index = ista->index;
379 priv->tgt_rate.isnew = 1;
380 trate = priv->tgt_rate;
381 priv->tgt_rate.capflags = caps;
382 trate.capflags = cpu_to_be32(caps);
383
384 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
385 if (ret) {
386 ath_print(common, ATH_DBG_FATAL,
387 "Unable to initialize Rate information on target\n");
388 return ret;
389 }
390
391 ath_print(common, ATH_DBG_CONFIG,
392 "Updated target STA: %pM (caps: 0x%x)\n", sta->addr, caps);
393 return 0;
394}
395
396static bool check_rc_update(struct ieee80211_hw *hw, bool *cw40)
397{
398 struct ath9k_htc_priv *priv = hw->priv;
399 struct ieee80211_conf *conf = &hw->conf;
400
401 if (!conf_is_ht(conf))
402 return false;
403
404 if (!(priv->op_flags & OP_ASSOCIATED) ||
405 (priv->op_flags & OP_SCANNING))
406 return false;
407
408 if (conf_is_ht40(conf)) {
409 if (priv->ah->curchan->chanmode &
410 (CHANNEL_HT40PLUS | CHANNEL_HT40MINUS)) {
411 return false;
412 } else {
413 *cw40 = true;
414 return true;
415 }
416 } else { /* ht20 */
417 if (priv->ah->curchan->chanmode & CHANNEL_HT20)
418 return false;
419 else
420 return true;
421 }
422}
423
424static void ath9k_htc_rc_update(struct ath9k_htc_priv *priv, bool is_cw40)
425{
426 struct ath9k_htc_target_rate trate;
427 struct ath_common *common = ath9k_hw_common(priv->ah);
428 int ret;
429 u8 cmd_rsp;
430
431 memset(&trate, 0, sizeof(trate));
432
433 trate = priv->tgt_rate;
434
435 if (is_cw40)
436 priv->tgt_rate.capflags |= WLAN_RC_40_FLAG;
437 else
438 priv->tgt_rate.capflags &= ~WLAN_RC_40_FLAG;
439
440 trate.capflags = cpu_to_be32(priv->tgt_rate.capflags);
441
442 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, &trate);
443 if (ret) {
444 ath_print(common, ATH_DBG_FATAL,
445 "Unable to update Rate information on target\n");
446 return;
447 }
448
449 ath_print(common, ATH_DBG_CONFIG, "Rate control updated with "
450 "caps:0x%x on target\n", priv->tgt_rate.capflags);
451}
452
453static int ath9k_htc_aggr_oper(struct ath9k_htc_priv *priv,
454 struct ieee80211_vif *vif,
455 u8 *sta_addr, u8 tid, bool oper)
456{
457 struct ath_common *common = ath9k_hw_common(priv->ah);
458 struct ath9k_htc_target_aggr aggr;
459 struct ieee80211_sta *sta = NULL;
460 struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
461 int ret = 0;
462 u8 cmd_rsp;
463
464 if (tid > ATH9K_HTC_MAX_TID)
465 return -EINVAL;
466
467 memset(&aggr, 0, sizeof(struct ath9k_htc_target_aggr));
468
469 rcu_read_lock();
470
471 /* Check if we are able to retrieve the station */
472 sta = ieee80211_find_sta(vif, sta_addr);
473 if (!sta) {
474 rcu_read_unlock();
475 return -EINVAL;
476 }
477
478 ista = (struct ath9k_htc_sta *) sta->drv_priv;
479
480 if (oper)
481 ista->tid_state[tid] = AGGR_START;
482 else
483 ista->tid_state[tid] = AGGR_STOP;
484
485 aggr.sta_index = ista->index;
486
487 rcu_read_unlock();
488
489 aggr.tidno = tid;
490 aggr.aggr_enable = oper;
491
492 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
493 if (ret)
494 ath_print(common, ATH_DBG_CONFIG,
495 "Unable to %s TX aggregation for (%pM, %d)\n",
496 (oper) ? "start" : "stop", sta->addr, tid);
497 else
498 ath_print(common, ATH_DBG_CONFIG,
499 "%s aggregation for (%pM, %d)\n",
500 (oper) ? "Starting" : "Stopping", sta->addr, tid);
501
502 return ret;
503}
504
505void ath9k_htc_aggr_work(struct work_struct *work)
506{
507 int ret = 0;
508 struct ath9k_htc_priv *priv =
509 container_of(work, struct ath9k_htc_priv,
510 ath9k_aggr_work.work);
511 struct ath9k_htc_aggr_work *wk = &priv->aggr_work;
512
513 mutex_lock(&wk->mutex);
514
515 switch (wk->action) {
516 case IEEE80211_AMPDU_TX_START:
517 ret = ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
518 wk->tid, true);
519 if (!ret)
520 ieee80211_start_tx_ba_cb(wk->vif, wk->sta_addr,
521 wk->tid);
522 break;
523 case IEEE80211_AMPDU_TX_STOP:
524 ath9k_htc_aggr_oper(priv, wk->vif, wk->sta_addr,
525 wk->tid, false);
526 ieee80211_stop_tx_ba_cb(wk->vif, wk->sta_addr, wk->tid);
527 break;
528 default:
529 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
530 "Unknown AMPDU action\n");
531 }
532
533 mutex_unlock(&wk->mutex);
534}
535
536/*********/
537/* DEBUG */
538/*********/
539
540#ifdef CONFIG_ATH9K_HTC_DEBUGFS
541
542static int ath9k_debugfs_open(struct inode *inode, struct file *file)
543{
544 file->private_data = inode->i_private;
545 return 0;
546}
547
548static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
549 size_t count, loff_t *ppos)
550{
551 struct ath9k_htc_priv *priv =
552 (struct ath9k_htc_priv *) file->private_data;
553 struct ath9k_htc_target_stats cmd_rsp;
554 char buf[512];
555 unsigned int len = 0;
556 int ret = 0;
557
558 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
559
560 WMI_CMD(WMI_TGT_STATS_CMDID);
561 if (ret)
562 return -EINVAL;
563
564
565 len += snprintf(buf + len, sizeof(buf) - len,
566 "%19s : %10u\n", "TX Short Retries",
567 be32_to_cpu(cmd_rsp.tx_shortretry));
568 len += snprintf(buf + len, sizeof(buf) - len,
569 "%19s : %10u\n", "TX Long Retries",
570 be32_to_cpu(cmd_rsp.tx_longretry));
571 len += snprintf(buf + len, sizeof(buf) - len,
572 "%19s : %10u\n", "TX Xretries",
573 be32_to_cpu(cmd_rsp.tx_xretries));
574 len += snprintf(buf + len, sizeof(buf) - len,
575 "%19s : %10u\n", "TX Unaggr. Xretries",
576 be32_to_cpu(cmd_rsp.ht_txunaggr_xretry));
577 len += snprintf(buf + len, sizeof(buf) - len,
578 "%19s : %10u\n", "TX Xretries (HT)",
579 be32_to_cpu(cmd_rsp.ht_tx_xretries));
580 len += snprintf(buf + len, sizeof(buf) - len,
581 "%19s : %10u\n", "TX Rate", priv->debug.txrate);
582
583 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
584}
585
586static const struct file_operations fops_tgt_stats = {
587 .read = read_file_tgt_stats,
588 .open = ath9k_debugfs_open,
589 .owner = THIS_MODULE
590};
591
592static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
593 size_t count, loff_t *ppos)
594{
595 struct ath9k_htc_priv *priv =
596 (struct ath9k_htc_priv *) file->private_data;
597 char buf[512];
598 unsigned int len = 0;
599
600 len += snprintf(buf + len, sizeof(buf) - len,
601 "%20s : %10u\n", "Buffers queued",
602 priv->debug.tx_stats.buf_queued);
603 len += snprintf(buf + len, sizeof(buf) - len,
604 "%20s : %10u\n", "Buffers completed",
605 priv->debug.tx_stats.buf_completed);
606 len += snprintf(buf + len, sizeof(buf) - len,
607 "%20s : %10u\n", "SKBs queued",
608 priv->debug.tx_stats.skb_queued);
609 len += snprintf(buf + len, sizeof(buf) - len,
610 "%20s : %10u\n", "SKBs completed",
611 priv->debug.tx_stats.skb_completed);
612
613 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
614}
615
616static const struct file_operations fops_xmit = {
617 .read = read_file_xmit,
618 .open = ath9k_debugfs_open,
619 .owner = THIS_MODULE
620};
621
622static ssize_t read_file_recv(struct file *file, char __user *user_buf,
623 size_t count, loff_t *ppos)
624{
625 struct ath9k_htc_priv *priv =
626 (struct ath9k_htc_priv *) file->private_data;
627 char buf[512];
628 unsigned int len = 0;
629
630 len += snprintf(buf + len, sizeof(buf) - len,
631 "%20s : %10u\n", "SKBs allocated",
632 priv->debug.rx_stats.skb_allocated);
633 len += snprintf(buf + len, sizeof(buf) - len,
634 "%20s : %10u\n", "SKBs completed",
635 priv->debug.rx_stats.skb_completed);
636 len += snprintf(buf + len, sizeof(buf) - len,
637 "%20s : %10u\n", "SKBs Dropped",
638 priv->debug.rx_stats.skb_dropped);
639
640 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
641}
642
643static const struct file_operations fops_recv = {
644 .read = read_file_recv,
645 .open = ath9k_debugfs_open,
646 .owner = THIS_MODULE
647};
648
649int ath9k_htc_init_debug(struct ath_hw *ah)
650{
651 struct ath_common *common = ath9k_hw_common(ah);
652 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
653
654 if (!ath9k_debugfs_root)
655 return -ENOENT;
656
657 priv->debug.debugfs_phy = debugfs_create_dir(wiphy_name(priv->hw->wiphy),
658 ath9k_debugfs_root);
659 if (!priv->debug.debugfs_phy)
660 goto err;
661
662 priv->debug.debugfs_tgt_stats = debugfs_create_file("tgt_stats", S_IRUSR,
663 priv->debug.debugfs_phy,
664 priv, &fops_tgt_stats);
665 if (!priv->debug.debugfs_tgt_stats)
666 goto err;
667
668
669 priv->debug.debugfs_xmit = debugfs_create_file("xmit", S_IRUSR,
670 priv->debug.debugfs_phy,
671 priv, &fops_xmit);
672 if (!priv->debug.debugfs_xmit)
673 goto err;
674
675 priv->debug.debugfs_recv = debugfs_create_file("recv", S_IRUSR,
676 priv->debug.debugfs_phy,
677 priv, &fops_recv);
678 if (!priv->debug.debugfs_recv)
679 goto err;
680
681 return 0;
682
683err:
684 ath9k_htc_exit_debug(ah);
685 return -ENOMEM;
686}
687
688void ath9k_htc_exit_debug(struct ath_hw *ah)
689{
690 struct ath_common *common = ath9k_hw_common(ah);
691 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
692
693 debugfs_remove(priv->debug.debugfs_recv);
694 debugfs_remove(priv->debug.debugfs_xmit);
695 debugfs_remove(priv->debug.debugfs_tgt_stats);
696 debugfs_remove(priv->debug.debugfs_phy);
697}
698
699int ath9k_htc_debug_create_root(void)
700{
701 ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
702 if (!ath9k_debugfs_root)
703 return -ENOENT;
704
705 return 0;
706}
707
708void ath9k_htc_debug_remove_root(void)
709{
710 debugfs_remove(ath9k_debugfs_root);
711 ath9k_debugfs_root = NULL;
712}
713
714#endif /* CONFIG_ATH9K_HTC_DEBUGFS */
715
716/*******/
717/* ANI */
718/*******/
719
720static void ath_start_ani(struct ath9k_htc_priv *priv)
721{
722 struct ath_common *common = ath9k_hw_common(priv->ah);
723 unsigned long timestamp = jiffies_to_msecs(jiffies);
724
725 common->ani.longcal_timer = timestamp;
726 common->ani.shortcal_timer = timestamp;
727 common->ani.checkani_timer = timestamp;
728
729 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
730 msecs_to_jiffies(ATH_ANI_POLLINTERVAL));
731}
732
733void ath9k_ani_work(struct work_struct *work)
734{
735 struct ath9k_htc_priv *priv =
736 container_of(work, struct ath9k_htc_priv,
737 ath9k_ani_work.work);
738 struct ath_hw *ah = priv->ah;
739 struct ath_common *common = ath9k_hw_common(ah);
740 bool longcal = false;
741 bool shortcal = false;
742 bool aniflag = false;
743 unsigned int timestamp = jiffies_to_msecs(jiffies);
744 u32 cal_interval, short_cal_interval;
745
746 short_cal_interval = ATH_STA_SHORT_CALINTERVAL;
747
748 /* Only calibrate if awake */
749 if (ah->power_mode != ATH9K_PM_AWAKE)
750 goto set_timer;
751
752 /* Long calibration runs independently of short calibration. */
753 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
754 longcal = true;
755 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
756 common->ani.longcal_timer = timestamp;
757 }
758
759 /* Short calibration applies only while caldone is false */
760 if (!common->ani.caldone) {
761 if ((timestamp - common->ani.shortcal_timer) >=
762 short_cal_interval) {
763 shortcal = true;
764 ath_print(common, ATH_DBG_ANI,
765 "shortcal @%lu\n", jiffies);
766 common->ani.shortcal_timer = timestamp;
767 common->ani.resetcal_timer = timestamp;
768 }
769 } else {
770 if ((timestamp - common->ani.resetcal_timer) >=
771 ATH_RESTART_CALINTERVAL) {
772 common->ani.caldone = ath9k_hw_reset_calvalid(ah);
773 if (common->ani.caldone)
774 common->ani.resetcal_timer = timestamp;
775 }
776 }
777
778 /* Verify whether we must check ANI */
779 if ((timestamp - common->ani.checkani_timer) >= ATH_ANI_POLLINTERVAL) {
780 aniflag = true;
781 common->ani.checkani_timer = timestamp;
782 }
783
784 /* Skip all processing if there's nothing to do. */
785 if (longcal || shortcal || aniflag) {
786
787 ath9k_htc_ps_wakeup(priv);
788
789 /* Call ANI routine if necessary */
790 if (aniflag)
791 ath9k_hw_ani_monitor(ah, ah->curchan);
792
793 /* Perform calibration if necessary */
794 if (longcal || shortcal) {
795 common->ani.caldone =
796 ath9k_hw_calibrate(ah, ah->curchan,
797 common->rx_chainmask,
798 longcal);
799
800 if (longcal)
801 common->ani.noise_floor =
802 ath9k_hw_getchan_noise(ah, ah->curchan);
803
804 ath_print(common, ATH_DBG_ANI,
805 " calibrate chan %u/%x nf: %d\n",
806 ah->curchan->channel,
807 ah->curchan->channelFlags,
808 common->ani.noise_floor);
809 }
810
811 ath9k_htc_ps_restore(priv);
812 }
813
814set_timer:
815 /*
816 * Set timer interval based on previous results.
817 * The interval must be the shortest necessary to satisfy ANI,
818 * short calibration and long calibration.
819 */
820 cal_interval = ATH_LONG_CALINTERVAL;
821 if (priv->ah->config.enable_ani)
822 cal_interval = min(cal_interval, (u32)ATH_ANI_POLLINTERVAL);
823 if (!common->ani.caldone)
824 cal_interval = min(cal_interval, (u32)short_cal_interval);
825
826 ieee80211_queue_delayed_work(common->hw, &priv->ath9k_ani_work,
827 msecs_to_jiffies(cal_interval));
828}
829
830/*******/
831/* LED */
832/*******/
833
834static void ath9k_led_blink_work(struct work_struct *work)
835{
836 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
837 ath9k_led_blink_work.work);
838
839 if (!(priv->op_flags & OP_LED_ASSOCIATED))
840 return;
841
842 if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
843 (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
844 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
845 else
846 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
847 (priv->op_flags & OP_LED_ON) ? 1 : 0);
848
849 ieee80211_queue_delayed_work(priv->hw,
850 &priv->ath9k_led_blink_work,
851 (priv->op_flags & OP_LED_ON) ?
852 msecs_to_jiffies(priv->led_off_duration) :
853 msecs_to_jiffies(priv->led_on_duration));
854
855 priv->led_on_duration = priv->led_on_cnt ?
856 max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
857 ATH_LED_ON_DURATION_IDLE;
858 priv->led_off_duration = priv->led_off_cnt ?
859 max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
860 ATH_LED_OFF_DURATION_IDLE;
861 priv->led_on_cnt = priv->led_off_cnt = 0;
862
863 if (priv->op_flags & OP_LED_ON)
864 priv->op_flags &= ~OP_LED_ON;
865 else
866 priv->op_flags |= OP_LED_ON;
867}
868
869static void ath9k_led_brightness_work(struct work_struct *work)
870{
871 struct ath_led *led = container_of(work, struct ath_led,
872 brightness_work.work);
873 struct ath9k_htc_priv *priv = led->priv;
874
875 switch (led->brightness) {
876 case LED_OFF:
877 if (led->led_type == ATH_LED_ASSOC ||
878 led->led_type == ATH_LED_RADIO) {
879 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
880 (led->led_type == ATH_LED_RADIO));
881 priv->op_flags &= ~OP_LED_ASSOCIATED;
882 if (led->led_type == ATH_LED_RADIO)
883 priv->op_flags &= ~OP_LED_ON;
884 } else {
885 priv->led_off_cnt++;
886 }
887 break;
888 case LED_FULL:
889 if (led->led_type == ATH_LED_ASSOC) {
890 priv->op_flags |= OP_LED_ASSOCIATED;
891 ieee80211_queue_delayed_work(priv->hw,
892 &priv->ath9k_led_blink_work, 0);
893 } else if (led->led_type == ATH_LED_RADIO) {
894 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
895 priv->op_flags |= OP_LED_ON;
896 } else {
897 priv->led_on_cnt++;
898 }
899 break;
900 default:
901 break;
902 }
903}
904
905static void ath9k_led_brightness(struct led_classdev *led_cdev,
906 enum led_brightness brightness)
907{
908 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
909 struct ath9k_htc_priv *priv = led->priv;
910
911 led->brightness = brightness;
912 if (!(priv->op_flags & OP_LED_DEINIT))
913 ieee80211_queue_delayed_work(priv->hw,
914 &led->brightness_work, 0);
915}
916
917static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
918{
919 cancel_delayed_work_sync(&priv->radio_led.brightness_work);
920 cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
921 cancel_delayed_work_sync(&priv->tx_led.brightness_work);
922 cancel_delayed_work_sync(&priv->rx_led.brightness_work);
923}
924
925static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
926 char *trigger)
927{
928 int ret;
929
930 led->priv = priv;
931 led->led_cdev.name = led->name;
932 led->led_cdev.default_trigger = trigger;
933 led->led_cdev.brightness_set = ath9k_led_brightness;
934
935 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
936 if (ret)
937 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
938 "Failed to register led:%s", led->name);
939 else
940 led->registered = 1;
941
942 INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
943
944 return ret;
945}
946
947static void ath9k_unregister_led(struct ath_led *led)
948{
949 if (led->registered) {
950 led_classdev_unregister(&led->led_cdev);
951 led->registered = 0;
952 }
953}
954
955void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
956{
957 priv->op_flags |= OP_LED_DEINIT;
958 ath9k_unregister_led(&priv->assoc_led);
959 priv->op_flags &= ~OP_LED_ASSOCIATED;
960 ath9k_unregister_led(&priv->tx_led);
961 ath9k_unregister_led(&priv->rx_led);
962 ath9k_unregister_led(&priv->radio_led);
963 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
964}
965
966void ath9k_init_leds(struct ath9k_htc_priv *priv)
967{
968 char *trigger;
969 int ret;
970
971 if (AR_SREV_9287(priv->ah))
972 priv->ah->led_pin = ATH_LED_PIN_9287;
973 else if (AR_SREV_9271(priv->ah))
974 priv->ah->led_pin = ATH_LED_PIN_9271;
975 else
976 priv->ah->led_pin = ATH_LED_PIN_DEF;
977
978 /* Configure gpio 1 for output */
979 ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
980 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
981 /* LED off, active low */
982 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
983
984 INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
985
986 trigger = ieee80211_get_radio_led_name(priv->hw);
987 snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
988 "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
989 ret = ath9k_register_led(priv, &priv->radio_led, trigger);
990 priv->radio_led.led_type = ATH_LED_RADIO;
991 if (ret)
992 goto fail;
993
994 trigger = ieee80211_get_assoc_led_name(priv->hw);
995 snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
996 "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
997 ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
998 priv->assoc_led.led_type = ATH_LED_ASSOC;
999 if (ret)
1000 goto fail;
1001
1002 trigger = ieee80211_get_tx_led_name(priv->hw);
1003 snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
1004 "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
1005 ret = ath9k_register_led(priv, &priv->tx_led, trigger);
1006 priv->tx_led.led_type = ATH_LED_TX;
1007 if (ret)
1008 goto fail;
1009
1010 trigger = ieee80211_get_rx_led_name(priv->hw);
1011 snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
1012 "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
1013 ret = ath9k_register_led(priv, &priv->rx_led, trigger);
1014 priv->rx_led.led_type = ATH_LED_RX;
1015 if (ret)
1016 goto fail;
1017
1018 priv->op_flags &= ~OP_LED_DEINIT;
1019
1020 return;
1021
1022fail:
1023 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1024 ath9k_deinit_leds(priv);
1025}
1026
1027/*******************/
1028/* Rfkill */
1029/*******************/
1030
1031static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
1032{
1033 return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
1034 priv->ah->rfkill_polarity;
1035}
1036
1037static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
1038{
1039 struct ath9k_htc_priv *priv = hw->priv;
1040 bool blocked = !!ath_is_rfkill_set(priv);
1041
1042 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1043}
1044
1045void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
1046{
1047 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1048 wiphy_rfkill_start_polling(priv->hw->wiphy);
1049}
1050
1051/**********************/
1052/* mac80211 Callbacks */
1053/**********************/
1054
1055static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1056{
1057 struct ieee80211_hdr *hdr;
1058 struct ath9k_htc_priv *priv = hw->priv;
1059 int padpos, padsize, ret;
1060
1061 hdr = (struct ieee80211_hdr *) skb->data;
1062
1063 /* Add the padding after the header if this is not already done */
1064 padpos = ath9k_cmn_padpos(hdr->frame_control);
1065 padsize = padpos & 3;
1066 if (padsize && skb->len > padpos) {
1067 if (skb_headroom(skb) < padsize)
1068 return -1;
1069 skb_push(skb, padsize);
1070 memmove(skb->data, skb->data + padsize, padpos);
1071 }
1072
1073 ret = ath9k_htc_tx_start(priv, skb);
1074 if (ret != 0) {
1075 if (ret == -ENOMEM) {
1076 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1077 "Stopping TX queues\n");
1078 ieee80211_stop_queues(hw);
1079 spin_lock_bh(&priv->tx_lock);
1080 priv->tx_queues_stop = true;
1081 spin_unlock_bh(&priv->tx_lock);
1082 } else {
1083 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1084 "Tx failed");
1085 }
1086 goto fail_tx;
1087 }
1088
1089 return 0;
1090
1091fail_tx:
1092 dev_kfree_skb_any(skb);
1093 return 0;
1094}
1095
1096static int ath9k_htc_start(struct ieee80211_hw *hw)
1097{
1098 struct ath9k_htc_priv *priv = hw->priv;
1099 struct ath_hw *ah = priv->ah;
1100 struct ath_common *common = ath9k_hw_common(ah);
1101 struct ieee80211_channel *curchan = hw->conf.channel;
1102 struct ath9k_channel *init_channel;
1103 int ret = 0;
1104 enum htc_phymode mode;
1105 u16 htc_mode;
1106 u8 cmd_rsp;
1107
1108 ath_print(common, ATH_DBG_CONFIG,
1109 "Starting driver with initial channel: %d MHz\n",
1110 curchan->center_freq);
1111
1112 mutex_lock(&priv->mutex);
1113
1114 /* setup initial channel */
1115 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1116
1117 /* Reset SERDES registers */
1118 ath9k_hw_configpcipowersave(ah, 0, 0);
1119
1120 ath9k_hw_htc_resetinit(ah);
1121 ret = ath9k_hw_reset(ah, init_channel, false);
1122 if (ret) {
1123 ath_print(common, ATH_DBG_FATAL,
1124 "Unable to reset hardware; reset status %d "
1125 "(freq %u MHz)\n", ret, curchan->center_freq);
1126 goto mutex_unlock;
1127 }
1128
1129 ath_update_txpow(priv);
1130
1131 mode = ath9k_htc_get_curmode(priv, init_channel);
1132 htc_mode = cpu_to_be16(mode);
1133 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
1134 if (ret)
1135 goto mutex_unlock;
1136
1137 WMI_CMD(WMI_ATH_INIT_CMDID);
1138 if (ret)
1139 goto mutex_unlock;
1140
1141 WMI_CMD(WMI_START_RECV_CMDID);
1142 if (ret)
1143 goto mutex_unlock;
1144
1145 ath9k_host_rx_init(priv);
1146
1147 priv->op_flags &= ~OP_INVALID;
1148 htc_start(priv->htc);
1149
1150 spin_lock_bh(&priv->tx_lock);
1151 priv->tx_queues_stop = false;
1152 spin_unlock_bh(&priv->tx_lock);
1153
1154 ieee80211_wake_queues(hw);
1155
1156mutex_unlock:
1157 mutex_unlock(&priv->mutex);
1158 return ret;
1159}
1160
1161static void ath9k_htc_stop(struct ieee80211_hw *hw)
1162{
1163 struct ath9k_htc_priv *priv = hw->priv;
1164 struct ath_hw *ah = priv->ah;
1165 struct ath_common *common = ath9k_hw_common(ah);
1166 int ret = 0;
1167 u8 cmd_rsp;
1168
1169 mutex_lock(&priv->mutex);
1170
1171 if (priv->op_flags & OP_INVALID) {
1172 ath_print(common, ATH_DBG_ANY, "Device not present\n");
1173 mutex_unlock(&priv->mutex);
1174 return;
1175 }
1176
1177 ath9k_htc_ps_wakeup(priv);
1178 htc_stop(priv->htc);
1179 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1180 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1181 WMI_CMD(WMI_STOP_RECV_CMDID);
1182 ath9k_hw_phy_disable(ah);
1183 ath9k_hw_disable(ah);
1184 ath9k_hw_configpcipowersave(ah, 1, 1);
1185 ath9k_htc_ps_restore(priv);
1186 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1187
1188 cancel_work_sync(&priv->ps_work);
1189 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1190 cancel_delayed_work_sync(&priv->ath9k_aggr_work);
1191 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1192 ath9k_led_stop_brightness(priv);
1193 skb_queue_purge(&priv->tx_queue);
1194
1195 /* Remove monitor interface here */
1196 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1197 if (ath9k_htc_remove_monitor_interface(priv))
1198 ath_print(common, ATH_DBG_FATAL,
1199 "Unable to remove monitor interface\n");
1200 else
1201 ath_print(common, ATH_DBG_CONFIG,
1202 "Monitor interface removed\n");
1203 }
1204
1205 priv->op_flags |= OP_INVALID;
1206 mutex_unlock(&priv->mutex);
1207
1208 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
1209}
1210
1211static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1212 struct ieee80211_vif *vif)
1213{
1214 struct ath9k_htc_priv *priv = hw->priv;
1215 struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
1216 struct ath_common *common = ath9k_hw_common(priv->ah);
1217 struct ath9k_htc_target_vif hvif;
1218 int ret = 0;
1219 u8 cmd_rsp;
1220
1221 mutex_lock(&priv->mutex);
1222
1223 /* Only one interface for now */
1224 if (priv->nvifs > 0) {
1225 ret = -ENOBUFS;
1226 goto out;
1227 }
1228
1229 ath9k_htc_ps_wakeup(priv);
1230 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1231 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
1232
1233 switch (vif->type) {
1234 case NL80211_IFTYPE_STATION:
1235 hvif.opmode = cpu_to_be32(HTC_M_STA);
1236 break;
1237 case NL80211_IFTYPE_ADHOC:
1238 hvif.opmode = cpu_to_be32(HTC_M_IBSS);
1239 break;
1240 default:
1241 ath_print(common, ATH_DBG_FATAL,
1242 "Interface type %d not yet supported\n", vif->type);
1243 ret = -EOPNOTSUPP;
1244 goto out;
1245 }
1246
1247 ath_print(common, ATH_DBG_CONFIG,
1248 "Attach a VIF of type: %d\n", vif->type);
1249
1250 priv->ah->opmode = vif->type;
1251
1252 /* Index starts from zero on the target */
1253 avp->index = hvif.index = priv->nvifs;
1254 hvif.rtsthreshold = cpu_to_be16(2304);
1255 WMI_CMD_BUF(WMI_VAP_CREATE_CMDID, &hvif);
1256 if (ret)
1257 goto out;
1258
1259 priv->nvifs++;
1260
1261 /*
1262 * We need a node in target to tx mgmt frames
1263 * before association.
1264 */
1265 ret = ath9k_htc_add_station(priv, vif, NULL);
1266 if (ret)
1267 goto out;
1268
1269 ret = ath9k_htc_update_cap_target(priv);
1270 if (ret)
1271 ath_print(common, ATH_DBG_CONFIG, "Failed to update"
1272 " capability in target \n");
1273
1274 priv->vif = vif;
1275out:
1276 ath9k_htc_ps_restore(priv);
1277 mutex_unlock(&priv->mutex);
1278 return ret;
1279}
1280
1281static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1282 struct ieee80211_vif *vif)
1283{
1284 struct ath9k_htc_priv *priv = hw->priv;
1285 struct ath_common *common = ath9k_hw_common(priv->ah);
1286 struct ath9k_htc_vif *avp = (void *)vif->drv_priv;
1287 struct ath9k_htc_target_vif hvif;
1288 int ret = 0;
1289 u8 cmd_rsp;
1290
1291 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
1292
1293 mutex_lock(&priv->mutex);
1294
1295 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
1296 memcpy(&hvif.myaddr, vif->addr, ETH_ALEN);
1297 hvif.index = avp->index;
1298 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
1299 priv->nvifs--;
1300
1301 ath9k_htc_remove_station(priv, vif, NULL);
1302
1303 if (vif->type == NL80211_IFTYPE_ADHOC) {
1304 spin_lock_bh(&priv->beacon_lock);
1305 if (priv->beacon)
1306 dev_kfree_skb_any(priv->beacon);
1307 priv->beacon = NULL;
1308 spin_unlock_bh(&priv->beacon_lock);
1309 }
1310
1311 priv->vif = NULL;
1312
1313 mutex_unlock(&priv->mutex);
1314}
1315
1316static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1317{
1318 struct ath9k_htc_priv *priv = hw->priv;
1319 struct ath_common *common = ath9k_hw_common(priv->ah);
1320 struct ieee80211_conf *conf = &hw->conf;
1321
1322 mutex_lock(&priv->mutex);
1323
1324 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1325 struct ieee80211_channel *curchan = hw->conf.channel;
1326 int pos = curchan->hw_value;
1327 bool is_cw40 = false;
1328
1329 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1330 curchan->center_freq);
1331
1332 if (check_rc_update(hw, &is_cw40))
1333 ath9k_htc_rc_update(priv, is_cw40);
1334
1335 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]);
1336
1337 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1338 ath_print(common, ATH_DBG_FATAL,
1339 "Unable to set channel\n");
1340 mutex_unlock(&priv->mutex);
1341 return -EINVAL;
1342 }
1343
1344 }
1345 if (changed & IEEE80211_CONF_CHANGE_PS) {
1346 if (conf->flags & IEEE80211_CONF_PS) {
1347 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
1348 priv->ps_enabled = true;
1349 } else {
1350 priv->ps_enabled = false;
1351 cancel_work_sync(&priv->ps_work);
1352 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1353 }
1354 }
1355
1356 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1357 if (conf->flags & IEEE80211_CONF_MONITOR) {
1358 if (ath9k_htc_add_monitor_interface(priv))
1359 ath_print(common, ATH_DBG_FATAL,
1360 "Failed to set monitor mode\n");
1361 else
1362 ath_print(common, ATH_DBG_CONFIG,
1363 "HW opmode set to Monitor mode\n");
1364 }
1365 }
1366
1367 mutex_unlock(&priv->mutex);
1368
1369 return 0;
1370}
1371
1372#define SUPPORTED_FILTERS \
1373 (FIF_PROMISC_IN_BSS | \
1374 FIF_ALLMULTI | \
1375 FIF_CONTROL | \
1376 FIF_PSPOLL | \
1377 FIF_OTHER_BSS | \
1378 FIF_BCN_PRBRESP_PROMISC | \
1379 FIF_FCSFAIL)
1380
1381static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1382 unsigned int changed_flags,
1383 unsigned int *total_flags,
1384 u64 multicast)
1385{
1386 struct ath9k_htc_priv *priv = hw->priv;
1387 u32 rfilt;
1388
1389 mutex_lock(&priv->mutex);
1390
1391 ath9k_htc_ps_wakeup(priv);
1392 changed_flags &= SUPPORTED_FILTERS;
1393 *total_flags &= SUPPORTED_FILTERS;
1394
1395 priv->rxfilter = *total_flags;
1396 rfilt = ath9k_htc_calcrxfilter(priv);
1397 ath9k_hw_setrxfilter(priv->ah, rfilt);
1398
1399 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
1400 "Set HW RX filter: 0x%x\n", rfilt);
1401
1402 ath9k_htc_ps_restore(priv);
1403 mutex_unlock(&priv->mutex);
1404}
1405
1406static void ath9k_htc_sta_notify(struct ieee80211_hw *hw,
1407 struct ieee80211_vif *vif,
1408 enum sta_notify_cmd cmd,
1409 struct ieee80211_sta *sta)
1410{
1411 struct ath9k_htc_priv *priv = hw->priv;
1412 int ret;
1413
1414 switch (cmd) {
1415 case STA_NOTIFY_ADD:
1416 ret = ath9k_htc_add_station(priv, vif, sta);
1417 if (!ret)
1418 ath9k_htc_init_rate(priv, vif, sta);
1419 break;
1420 case STA_NOTIFY_REMOVE:
1421 ath9k_htc_remove_station(priv, vif, sta);
1422 break;
1423 default:
1424 break;
1425 }
1426}
1427
1428static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
1429 const struct ieee80211_tx_queue_params *params)
1430{
1431 struct ath9k_htc_priv *priv = hw->priv;
1432 struct ath_common *common = ath9k_hw_common(priv->ah);
1433 struct ath9k_tx_queue_info qi;
1434 int ret = 0, qnum;
1435
1436 if (queue >= WME_NUM_AC)
1437 return 0;
1438
1439 mutex_lock(&priv->mutex);
1440
1441 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
1442
1443 qi.tqi_aifs = params->aifs;
1444 qi.tqi_cwmin = params->cw_min;
1445 qi.tqi_cwmax = params->cw_max;
1446 qi.tqi_burstTime = params->txop;
1447
1448 qnum = get_hw_qnum(queue, priv->hwq_map);
1449
1450 ath_print(common, ATH_DBG_CONFIG,
1451 "Configure tx [queue/hwq] [%d/%d], "
1452 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1453 queue, qnum, params->aifs, params->cw_min,
1454 params->cw_max, params->txop);
1455
1456 ret = ath_htc_txq_update(priv, qnum, &qi);
1457 if (ret)
1458 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
1459
1460 mutex_unlock(&priv->mutex);
1461
1462 return ret;
1463}
1464
1465static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1466 enum set_key_cmd cmd,
1467 struct ieee80211_vif *vif,
1468 struct ieee80211_sta *sta,
1469 struct ieee80211_key_conf *key)
1470{
1471 struct ath9k_htc_priv *priv = hw->priv;
1472 struct ath_common *common = ath9k_hw_common(priv->ah);
1473 int ret = 0;
1474
1475 if (htc_modparam_nohwcrypt)
1476 return -ENOSPC;
1477
1478 mutex_lock(&priv->mutex);
1479 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
1480 ath9k_htc_ps_wakeup(priv);
1481
1482 switch (cmd) {
1483 case SET_KEY:
1484 ret = ath9k_cmn_key_config(common, vif, sta, key);
1485 if (ret >= 0) {
1486 key->hw_key_idx = ret;
1487 /* push IV and Michael MIC generation to stack */
1488 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1489 if (key->alg == ALG_TKIP)
1490 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
1491 if (priv->ah->sw_mgmt_crypto && key->alg == ALG_CCMP)
1492 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
1493 ret = 0;
1494 }
1495 break;
1496 case DISABLE_KEY:
1497 ath9k_cmn_key_delete(common, key);
1498 break;
1499 default:
1500 ret = -EINVAL;
1501 }
1502
1503 ath9k_htc_ps_restore(priv);
1504 mutex_unlock(&priv->mutex);
1505
1506 return ret;
1507}
1508
1509static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1510 struct ieee80211_vif *vif,
1511 struct ieee80211_bss_conf *bss_conf,
1512 u32 changed)
1513{
1514 struct ath9k_htc_priv *priv = hw->priv;
1515 struct ath_hw *ah = priv->ah;
1516 struct ath_common *common = ath9k_hw_common(ah);
1517
1518 mutex_lock(&priv->mutex);
1519 ath9k_htc_ps_wakeup(priv);
1520
1521 if (changed & BSS_CHANGED_ASSOC) {
1522 common->curaid = bss_conf->assoc ?
1523 bss_conf->aid : 0;
1524 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1525 bss_conf->assoc);
1526
1527 if (bss_conf->assoc) {
1528 priv->op_flags |= OP_ASSOCIATED;
1529 ath_start_ani(priv);
1530 } else {
1531 priv->op_flags &= ~OP_ASSOCIATED;
1532 cancel_work_sync(&priv->ps_work);
1533 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1534 }
1535 }
1536
1537 if (changed & BSS_CHANGED_BSSID) {
1538 /* Set BSSID */
1539 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1540 ath9k_hw_write_associd(ah);
1541
1542 ath_print(common, ATH_DBG_CONFIG,
1543 "BSSID: %pM aid: 0x%x\n",
1544 common->curbssid, common->curaid);
1545 }
1546
1547 if ((changed & BSS_CHANGED_BEACON_INT) ||
1548 (changed & BSS_CHANGED_BEACON) ||
1549 ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1550 bss_conf->enable_beacon)) {
1551 priv->op_flags |= OP_ENABLE_BEACON;
1552 ath9k_htc_beacon_config(priv, vif);
1553 }
1554
1555 if (changed & BSS_CHANGED_BEACON)
1556 ath9k_htc_beacon_update(priv, vif);
1557
1558 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1559 !bss_conf->enable_beacon) {
1560 priv->op_flags &= ~OP_ENABLE_BEACON;
1561 ath9k_htc_beacon_config(priv, vif);
1562 }
1563
1564 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1565 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
1566 bss_conf->use_short_preamble);
1567 if (bss_conf->use_short_preamble)
1568 priv->op_flags |= OP_PREAMBLE_SHORT;
1569 else
1570 priv->op_flags &= ~OP_PREAMBLE_SHORT;
1571 }
1572
1573 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1574 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
1575 bss_conf->use_cts_prot);
1576 if (bss_conf->use_cts_prot &&
1577 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
1578 priv->op_flags |= OP_PROTECT_ENABLE;
1579 else
1580 priv->op_flags &= ~OP_PROTECT_ENABLE;
1581 }
1582
1583 if (changed & BSS_CHANGED_ERP_SLOT) {
1584 if (bss_conf->use_short_slot)
1585 ah->slottime = 9;
1586 else
1587 ah->slottime = 20;
1588
1589 ath9k_hw_init_global_settings(ah);
1590 }
1591
1592 ath9k_htc_ps_restore(priv);
1593 mutex_unlock(&priv->mutex);
1594}
1595
1596static u64 ath9k_htc_get_tsf(struct ieee80211_hw *hw)
1597{
1598 struct ath9k_htc_priv *priv = hw->priv;
1599 u64 tsf;
1600
1601 mutex_lock(&priv->mutex);
1602 tsf = ath9k_hw_gettsf64(priv->ah);
1603 mutex_unlock(&priv->mutex);
1604
1605 return tsf;
1606}
1607
1608static void ath9k_htc_set_tsf(struct ieee80211_hw *hw, u64 tsf)
1609{
1610 struct ath9k_htc_priv *priv = hw->priv;
1611
1612 mutex_lock(&priv->mutex);
1613 ath9k_hw_settsf64(priv->ah, tsf);
1614 mutex_unlock(&priv->mutex);
1615}
1616
1617static void ath9k_htc_reset_tsf(struct ieee80211_hw *hw)
1618{
1619 struct ath9k_htc_priv *priv = hw->priv;
1620
1621 ath9k_htc_ps_wakeup(priv);
1622 mutex_lock(&priv->mutex);
1623 ath9k_hw_reset_tsf(priv->ah);
1624 mutex_unlock(&priv->mutex);
1625 ath9k_htc_ps_restore(priv);
1626}
1627
1628static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1629 struct ieee80211_vif *vif,
1630 enum ieee80211_ampdu_mlme_action action,
1631 struct ieee80211_sta *sta,
1632 u16 tid, u16 *ssn)
1633{
1634 struct ath9k_htc_priv *priv = hw->priv;
1635 struct ath9k_htc_aggr_work *work = &priv->aggr_work;
1636 struct ath9k_htc_sta *ista;
1637
1638 switch (action) {
1639 case IEEE80211_AMPDU_RX_START:
1640 break;
1641 case IEEE80211_AMPDU_RX_STOP:
1642 break;
1643 case IEEE80211_AMPDU_TX_START:
1644 case IEEE80211_AMPDU_TX_STOP:
1645 if (!(priv->op_flags & OP_TXAGGR))
1646 return -ENOTSUPP;
1647 memcpy(work->sta_addr, sta->addr, ETH_ALEN);
1648 work->hw = hw;
1649 work->vif = vif;
1650 work->action = action;
1651 work->tid = tid;
1652 ieee80211_queue_delayed_work(hw, &priv->ath9k_aggr_work, 0);
1653 break;
1654 case IEEE80211_AMPDU_TX_OPERATIONAL:
1655 ista = (struct ath9k_htc_sta *) sta->drv_priv;
1656 ista->tid_state[tid] = AGGR_OPERATIONAL;
1657 break;
1658 default:
1659 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
1660 "Unknown AMPDU action\n");
1661 }
1662
1663 return 0;
1664}
1665
1666static void ath9k_htc_sw_scan_start(struct ieee80211_hw *hw)
1667{
1668 struct ath9k_htc_priv *priv = hw->priv;
1669
1670 mutex_lock(&priv->mutex);
1671 spin_lock_bh(&priv->beacon_lock);
1672 priv->op_flags |= OP_SCANNING;
1673 spin_unlock_bh(&priv->beacon_lock);
1674 cancel_work_sync(&priv->ps_work);
1675 cancel_delayed_work_sync(&priv->ath9k_ani_work);
1676 mutex_unlock(&priv->mutex);
1677}
1678
1679static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1680{
1681 struct ath9k_htc_priv *priv = hw->priv;
1682
1683 ath9k_htc_ps_wakeup(priv);
1684 mutex_lock(&priv->mutex);
1685 spin_lock_bh(&priv->beacon_lock);
1686 priv->op_flags &= ~OP_SCANNING;
1687 spin_unlock_bh(&priv->beacon_lock);
1688 priv->op_flags |= OP_FULL_RESET;
1689 if (priv->op_flags & OP_ASSOCIATED)
1690 ath9k_htc_beacon_config(priv, NULL);
1691 ath_start_ani(priv);
1692 mutex_unlock(&priv->mutex);
1693 ath9k_htc_ps_restore(priv);
1694}
1695
1696static int ath9k_htc_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1697{
1698 return 0;
1699}
1700
1701static void ath9k_htc_set_coverage_class(struct ieee80211_hw *hw,
1702 u8 coverage_class)
1703{
1704 struct ath9k_htc_priv *priv = hw->priv;
1705
1706 mutex_lock(&priv->mutex);
1707 priv->ah->coverage_class = coverage_class;
1708 ath9k_hw_init_global_settings(priv->ah);
1709 mutex_unlock(&priv->mutex);
1710}
1711
1712struct ieee80211_ops ath9k_htc_ops = {
1713 .tx = ath9k_htc_tx,
1714 .start = ath9k_htc_start,
1715 .stop = ath9k_htc_stop,
1716 .add_interface = ath9k_htc_add_interface,
1717 .remove_interface = ath9k_htc_remove_interface,
1718 .config = ath9k_htc_config,
1719 .configure_filter = ath9k_htc_configure_filter,
1720 .sta_notify = ath9k_htc_sta_notify,
1721 .conf_tx = ath9k_htc_conf_tx,
1722 .bss_info_changed = ath9k_htc_bss_info_changed,
1723 .set_key = ath9k_htc_set_key,
1724 .get_tsf = ath9k_htc_get_tsf,
1725 .set_tsf = ath9k_htc_set_tsf,
1726 .reset_tsf = ath9k_htc_reset_tsf,
1727 .ampdu_action = ath9k_htc_ampdu_action,
1728 .sw_scan_start = ath9k_htc_sw_scan_start,
1729 .sw_scan_complete = ath9k_htc_sw_scan_complete,
1730 .set_rts_threshold = ath9k_htc_set_rts_threshold,
1731 .rfkill_poll = ath9k_htc_rfkill_poll_state,
1732 .set_coverage_class = ath9k_htc_set_coverage_class,
1733};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
new file mode 100644
index 000000000000..0a7cb30af5b4
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -0,0 +1,704 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19/******/
20/* TX */
21/******/
22
23int get_hw_qnum(u16 queue, int *hwq_map)
24{
25 switch (queue) {
26 case 0:
27 return hwq_map[ATH9K_WME_AC_VO];
28 case 1:
29 return hwq_map[ATH9K_WME_AC_VI];
30 case 2:
31 return hwq_map[ATH9K_WME_AC_BE];
32 case 3:
33 return hwq_map[ATH9K_WME_AC_BK];
34 default:
35 return hwq_map[ATH9K_WME_AC_BE];
36 }
37}
38
39int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
40 struct ath9k_tx_queue_info *qinfo)
41{
42 struct ath_hw *ah = priv->ah;
43 int error = 0;
44 struct ath9k_tx_queue_info qi;
45
46 ath9k_hw_get_txq_props(ah, qnum, &qi);
47
48 qi.tqi_aifs = qinfo->tqi_aifs;
49 qi.tqi_cwmin = qinfo->tqi_cwmin / 2; /* XXX */
50 qi.tqi_cwmax = qinfo->tqi_cwmax;
51 qi.tqi_burstTime = qinfo->tqi_burstTime;
52 qi.tqi_readyTime = qinfo->tqi_readyTime;
53
54 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
55 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
56 "Unable to update hardware queue %u!\n", qnum);
57 error = -EIO;
58 } else {
59 ath9k_hw_resettxqueue(ah, qnum);
60 }
61
62 return error;
63}
64
65int ath9k_htc_tx_start(struct ath9k_htc_priv *priv, struct sk_buff *skb)
66{
67 struct ieee80211_hdr *hdr;
68 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
69 struct ieee80211_sta *sta = tx_info->control.sta;
70 struct ath9k_htc_sta *ista;
71 struct ath9k_htc_vif *avp;
72 struct ath9k_htc_tx_ctl tx_ctl;
73 enum htc_endpoint_id epid;
74 u16 qnum, hw_qnum;
75 __le16 fc;
76 u8 *tx_fhdr;
77 u8 sta_idx;
78
79 hdr = (struct ieee80211_hdr *) skb->data;
80 fc = hdr->frame_control;
81
82 avp = (struct ath9k_htc_vif *) tx_info->control.vif->drv_priv;
83 if (sta) {
84 ista = (struct ath9k_htc_sta *) sta->drv_priv;
85 sta_idx = ista->index;
86 } else {
87 sta_idx = 0;
88 }
89
90 memset(&tx_ctl, 0, sizeof(struct ath9k_htc_tx_ctl));
91
92 if (ieee80211_is_data(fc)) {
93 struct tx_frame_hdr tx_hdr;
94 u8 *qc;
95
96 memset(&tx_hdr, 0, sizeof(struct tx_frame_hdr));
97
98 tx_hdr.node_idx = sta_idx;
99 tx_hdr.vif_idx = avp->index;
100
101 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
102 tx_ctl.type = ATH9K_HTC_AMPDU;
103 tx_hdr.data_type = ATH9K_HTC_AMPDU;
104 } else {
105 tx_ctl.type = ATH9K_HTC_NORMAL;
106 tx_hdr.data_type = ATH9K_HTC_NORMAL;
107 }
108
109 if (ieee80211_is_data(fc)) {
110 qc = ieee80211_get_qos_ctl(hdr);
111 tx_hdr.tidno = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
112 }
113
114 /* Check for RTS protection */
115 if (priv->hw->wiphy->rts_threshold != (u32) -1)
116 if (skb->len > priv->hw->wiphy->rts_threshold)
117 tx_hdr.flags |= ATH9K_HTC_TX_RTSCTS;
118
119 /* CTS-to-self */
120 if (!(tx_hdr.flags & ATH9K_HTC_TX_RTSCTS) &&
121 (priv->op_flags & OP_PROTECT_ENABLE))
122 tx_hdr.flags |= ATH9K_HTC_TX_CTSONLY;
123
124 tx_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
125 if (tx_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
126 tx_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
127 else
128 tx_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
129
130 tx_fhdr = skb_push(skb, sizeof(tx_hdr));
131 memcpy(tx_fhdr, (u8 *) &tx_hdr, sizeof(tx_hdr));
132
133 qnum = skb_get_queue_mapping(skb);
134 hw_qnum = get_hw_qnum(qnum, priv->hwq_map);
135
136 switch (hw_qnum) {
137 case 0:
138 epid = priv->data_be_ep;
139 break;
140 case 2:
141 epid = priv->data_vi_ep;
142 break;
143 case 3:
144 epid = priv->data_vo_ep;
145 break;
146 case 1:
147 default:
148 epid = priv->data_bk_ep;
149 break;
150 }
151 } else {
152 struct tx_mgmt_hdr mgmt_hdr;
153
154 memset(&mgmt_hdr, 0, sizeof(struct tx_mgmt_hdr));
155
156 tx_ctl.type = ATH9K_HTC_NORMAL;
157
158 mgmt_hdr.node_idx = sta_idx;
159 mgmt_hdr.vif_idx = avp->index;
160 mgmt_hdr.tidno = 0;
161 mgmt_hdr.flags = 0;
162
163 mgmt_hdr.key_type = ath9k_cmn_get_hw_crypto_keytype(skb);
164 if (mgmt_hdr.key_type == ATH9K_KEY_TYPE_CLEAR)
165 mgmt_hdr.keyix = (u8) ATH9K_TXKEYIX_INVALID;
166 else
167 mgmt_hdr.keyix = tx_info->control.hw_key->hw_key_idx;
168
169 tx_fhdr = skb_push(skb, sizeof(mgmt_hdr));
170 memcpy(tx_fhdr, (u8 *) &mgmt_hdr, sizeof(mgmt_hdr));
171 epid = priv->mgmt_ep;
172 }
173
174 return htc_send(priv->htc, skb, epid, &tx_ctl);
175}
176
177void ath9k_tx_tasklet(unsigned long data)
178{
179 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
180 struct ieee80211_sta *sta;
181 struct ieee80211_hdr *hdr;
182 struct ieee80211_tx_info *tx_info;
183 struct sk_buff *skb = NULL;
184 __le16 fc;
185
186 while ((skb = skb_dequeue(&priv->tx_queue)) != NULL) {
187
188 hdr = (struct ieee80211_hdr *) skb->data;
189 fc = hdr->frame_control;
190 tx_info = IEEE80211_SKB_CB(skb);
191
192 memset(&tx_info->status, 0, sizeof(tx_info->status));
193
194 rcu_read_lock();
195
196 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
197 if (!sta) {
198 rcu_read_unlock();
199 ieee80211_tx_status(priv->hw, skb);
200 continue;
201 }
202
203 /* Check if we need to start aggregation */
204
205 if (sta && conf_is_ht(&priv->hw->conf) &&
206 (priv->op_flags & OP_TXAGGR)
207 && !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
208 if (ieee80211_is_data_qos(fc)) {
209 u8 *qc, tid;
210 struct ath9k_htc_sta *ista;
211
212 qc = ieee80211_get_qos_ctl(hdr);
213 tid = qc[0] & 0xf;
214 ista = (struct ath9k_htc_sta *)sta->drv_priv;
215
216 if ((tid < ATH9K_HTC_MAX_TID) &&
217 ista->tid_state[tid] == AGGR_STOP) {
218 ieee80211_start_tx_ba_session(sta, tid);
219 ista->tid_state[tid] = AGGR_PROGRESS;
220 }
221 }
222 }
223
224 rcu_read_unlock();
225
226 /* Send status to mac80211 */
227 ieee80211_tx_status(priv->hw, skb);
228 }
229
230 /* Wake TX queues if needed */
231 spin_lock_bh(&priv->tx_lock);
232 if (priv->tx_queues_stop) {
233 priv->tx_queues_stop = false;
234 spin_unlock_bh(&priv->tx_lock);
235 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
236 "Waking up TX queues\n");
237 ieee80211_wake_queues(priv->hw);
238 return;
239 }
240 spin_unlock_bh(&priv->tx_lock);
241}
242
243void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
244 enum htc_endpoint_id ep_id, bool txok)
245{
246 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) drv_priv;
247 struct ieee80211_tx_info *tx_info;
248
249 if (!skb)
250 return;
251
252 if (ep_id == priv->mgmt_ep)
253 skb_pull(skb, sizeof(struct tx_mgmt_hdr));
254 else
255 /* TODO: Check for cab/uapsd/data */
256 skb_pull(skb, sizeof(struct tx_frame_hdr));
257
258 tx_info = IEEE80211_SKB_CB(skb);
259
260 if (txok)
261 tx_info->flags |= IEEE80211_TX_STAT_ACK;
262
263 skb_queue_tail(&priv->tx_queue, skb);
264 tasklet_schedule(&priv->tx_tasklet);
265}
266
267int ath9k_tx_init(struct ath9k_htc_priv *priv)
268{
269 skb_queue_head_init(&priv->tx_queue);
270 return 0;
271}
272
273void ath9k_tx_cleanup(struct ath9k_htc_priv *priv)
274{
275
276}
277
278bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv,
279 enum ath9k_tx_queue_subtype subtype)
280{
281 struct ath_hw *ah = priv->ah;
282 struct ath_common *common = ath9k_hw_common(ah);
283 struct ath9k_tx_queue_info qi;
284 int qnum;
285
286 memset(&qi, 0, sizeof(qi));
287
288 qi.tqi_subtype = subtype;
289 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
290 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
291 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
292 qi.tqi_physCompBuf = 0;
293 qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE | TXQ_FLAG_TXDESCINT_ENABLE;
294
295 qnum = ath9k_hw_setuptxqueue(priv->ah, ATH9K_TX_QUEUE_DATA, &qi);
296 if (qnum == -1)
297 return false;
298
299 if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
300 ath_print(common, ATH_DBG_FATAL,
301 "qnum %u out of range, max %u!\n",
302 qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
303 ath9k_hw_releasetxqueue(ah, qnum);
304 return false;
305 }
306
307 priv->hwq_map[subtype] = qnum;
308 return true;
309}
310
311/******/
312/* RX */
313/******/
314
315/*
316 * Calculate the RX filter to be set in the HW.
317 */
318u32 ath9k_htc_calcrxfilter(struct ath9k_htc_priv *priv)
319{
320#define RX_FILTER_PRESERVE (ATH9K_RX_FILTER_PHYERR | ATH9K_RX_FILTER_PHYRADAR)
321
322 struct ath_hw *ah = priv->ah;
323 u32 rfilt;
324
325 rfilt = (ath9k_hw_getrxfilter(ah) & RX_FILTER_PRESERVE)
326 | ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
327 | ATH9K_RX_FILTER_MCAST;
328
329 /* If not a STA, enable processing of Probe Requests */
330 if (ah->opmode != NL80211_IFTYPE_STATION)
331 rfilt |= ATH9K_RX_FILTER_PROBEREQ;
332
333 /*
334 * Set promiscuous mode when FIF_PROMISC_IN_BSS is enabled for station
335 * mode interface or when in monitor mode. AP mode does not need this
336 * since it receives all in-BSS frames anyway.
337 */
338 if (((ah->opmode != NL80211_IFTYPE_AP) &&
339 (priv->rxfilter & FIF_PROMISC_IN_BSS)) ||
340 (ah->opmode == NL80211_IFTYPE_MONITOR))
341 rfilt |= ATH9K_RX_FILTER_PROM;
342
343 if (priv->rxfilter & FIF_CONTROL)
344 rfilt |= ATH9K_RX_FILTER_CONTROL;
345
346 if ((ah->opmode == NL80211_IFTYPE_STATION) &&
347 !(priv->rxfilter & FIF_BCN_PRBRESP_PROMISC))
348 rfilt |= ATH9K_RX_FILTER_MYBEACON;
349 else
350 rfilt |= ATH9K_RX_FILTER_BEACON;
351
352 if (conf_is_ht(&priv->hw->conf))
353 rfilt |= ATH9K_RX_FILTER_COMP_BAR;
354
355 return rfilt;
356
357#undef RX_FILTER_PRESERVE
358}
359
360/*
361 * Recv initialization for opmode change.
362 */
363static void ath9k_htc_opmode_init(struct ath9k_htc_priv *priv)
364{
365 struct ath_hw *ah = priv->ah;
366 struct ath_common *common = ath9k_hw_common(ah);
367
368 u32 rfilt, mfilt[2];
369
370 /* configure rx filter */
371 rfilt = ath9k_htc_calcrxfilter(priv);
372 ath9k_hw_setrxfilter(ah, rfilt);
373
374 /* configure bssid mask */
375 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
376 ath_hw_setbssidmask(common);
377
378 /* configure operational mode */
379 ath9k_hw_setopmode(ah);
380
381 /* Handle any link-level address change. */
382 ath9k_hw_setmac(ah, common->macaddr);
383
384 /* calculate and install multicast filter */
385 mfilt[0] = mfilt[1] = ~0;
386 ath9k_hw_setmcastfilter(ah, mfilt[0], mfilt[1]);
387}
388
389void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
390{
391 ath9k_hw_rxena(priv->ah);
392 ath9k_htc_opmode_init(priv);
393 ath9k_hw_startpcureceive(priv->ah);
394 priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
395}
396
397static void ath9k_process_rate(struct ieee80211_hw *hw,
398 struct ieee80211_rx_status *rxs,
399 u8 rx_rate, u8 rs_flags)
400{
401 struct ieee80211_supported_band *sband;
402 enum ieee80211_band band;
403 unsigned int i = 0;
404
405 if (rx_rate & 0x80) {
406 /* HT rate */
407 rxs->flag |= RX_FLAG_HT;
408 if (rs_flags & ATH9K_RX_2040)
409 rxs->flag |= RX_FLAG_40MHZ;
410 if (rs_flags & ATH9K_RX_GI)
411 rxs->flag |= RX_FLAG_SHORT_GI;
412 rxs->rate_idx = rx_rate & 0x7f;
413 return;
414 }
415
416 band = hw->conf.channel->band;
417 sband = hw->wiphy->bands[band];
418
419 for (i = 0; i < sband->n_bitrates; i++) {
420 if (sband->bitrates[i].hw_value == rx_rate) {
421 rxs->rate_idx = i;
422 return;
423 }
424 if (sband->bitrates[i].hw_value_short == rx_rate) {
425 rxs->rate_idx = i;
426 rxs->flag |= RX_FLAG_SHORTPRE;
427 return;
428 }
429 }
430
431}
432
433static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
434 struct ath9k_htc_rxbuf *rxbuf,
435 struct ieee80211_rx_status *rx_status)
436
437{
438 struct ieee80211_hdr *hdr;
439 struct ieee80211_hw *hw = priv->hw;
440 struct sk_buff *skb = rxbuf->skb;
441 struct ath_common *common = ath9k_hw_common(priv->ah);
442 int hdrlen, padpos, padsize;
443 int last_rssi = ATH_RSSI_DUMMY_MARKER;
444 __le16 fc;
445
446 hdr = (struct ieee80211_hdr *)skb->data;
447 fc = hdr->frame_control;
448 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
449
450 padpos = ath9k_cmn_padpos(fc);
451
452 padsize = padpos & 3;
453 if (padsize && skb->len >= padpos+padsize+FCS_LEN) {
454 memmove(skb->data + padsize, skb->data, padpos);
455 skb_pull(skb, padsize);
456 }
457
458 memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
459
460 if (rxbuf->rxstatus.rs_status != 0) {
461 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
462 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
463 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
464 goto rx_next;
465
466 if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
467 /* FIXME */
468 } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
469 if (ieee80211_is_ctl(fc))
470 /*
471 * Sometimes, we get invalid
472 * MIC failures on valid control frames.
473 * Remove these mic errors.
474 */
475 rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
476 else
477 rx_status->flag |= RX_FLAG_MMIC_ERROR;
478 }
479
480 /*
481 * Reject error frames with the exception of
482 * decryption and MIC failures. For monitor mode,
483 * we also ignore the CRC error.
484 */
485 if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
486 if (rxbuf->rxstatus.rs_status &
487 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
488 ATH9K_RXERR_CRC))
489 goto rx_next;
490 } else {
491 if (rxbuf->rxstatus.rs_status &
492 ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
493 goto rx_next;
494 }
495 }
496 }
497
498 if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
499 u8 keyix;
500 keyix = rxbuf->rxstatus.rs_keyix;
501 if (keyix != ATH9K_RXKEYIX_INVALID) {
502 rx_status->flag |= RX_FLAG_DECRYPTED;
503 } else if (ieee80211_has_protected(fc) &&
504 skb->len >= hdrlen + 4) {
505 keyix = skb->data[hdrlen + 3] >> 6;
506 if (test_bit(keyix, common->keymap))
507 rx_status->flag |= RX_FLAG_DECRYPTED;
508 }
509 }
510
511 ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
512 rxbuf->rxstatus.rs_flags);
513
514 if (priv->op_flags & OP_ASSOCIATED) {
515 if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
516 !rxbuf->rxstatus.rs_moreaggr)
517 ATH_RSSI_LPF(priv->rx.last_rssi,
518 rxbuf->rxstatus.rs_rssi);
519
520 last_rssi = priv->rx.last_rssi;
521
522 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
523 rxbuf->rxstatus.rs_rssi = ATH_EP_RND(last_rssi,
524 ATH_RSSI_EP_MULTIPLIER);
525
526 if (rxbuf->rxstatus.rs_rssi < 0)
527 rxbuf->rxstatus.rs_rssi = 0;
528
529 if (ieee80211_is_beacon(fc))
530 priv->ah->stats.avgbrssi = rxbuf->rxstatus.rs_rssi;
531 }
532
533 rx_status->mactime = rxbuf->rxstatus.rs_tstamp;
534 rx_status->band = hw->conf.channel->band;
535 rx_status->freq = hw->conf.channel->center_freq;
536 rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
537 rx_status->antenna = rxbuf->rxstatus.rs_antenna;
538 rx_status->flag |= RX_FLAG_TSFT;
539
540 return true;
541
542rx_next:
543 return false;
544}
545
546/*
547 * FIXME: Handle FLUSH later on.
548 */
549void ath9k_rx_tasklet(unsigned long data)
550{
551 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
552 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
553 struct ieee80211_rx_status rx_status;
554 struct sk_buff *skb;
555 unsigned long flags;
556 struct ieee80211_hdr *hdr;
557
558 do {
559 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
560 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
561 if (tmp_buf->in_process) {
562 rxbuf = tmp_buf;
563 break;
564 }
565 }
566
567 if (rxbuf == NULL) {
568 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
569 break;
570 }
571
572 if (!rxbuf->skb)
573 goto requeue;
574
575 if (!ath9k_rx_prepare(priv, rxbuf, &rx_status)) {
576 dev_kfree_skb_any(rxbuf->skb);
577 goto requeue;
578 }
579
580 memcpy(IEEE80211_SKB_RXCB(rxbuf->skb), &rx_status,
581 sizeof(struct ieee80211_rx_status));
582 skb = rxbuf->skb;
583 hdr = (struct ieee80211_hdr *) skb->data;
584
585 if (ieee80211_is_beacon(hdr->frame_control) && priv->ps_enabled)
586 ieee80211_queue_work(priv->hw, &priv->ps_work);
587
588 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
589
590 ieee80211_rx(priv->hw, skb);
591
592 spin_lock_irqsave(&priv->rx.rxbuflock, flags);
593requeue:
594 rxbuf->in_process = false;
595 rxbuf->skb = NULL;
596 list_move_tail(&rxbuf->list, &priv->rx.rxbuf);
597 rxbuf = NULL;
598 spin_unlock_irqrestore(&priv->rx.rxbuflock, flags);
599 } while (1);
600
601}
602
603void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
604 enum htc_endpoint_id ep_id)
605{
606 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)drv_priv;
607 struct ath_hw *ah = priv->ah;
608 struct ath_common *common = ath9k_hw_common(ah);
609 struct ath9k_htc_rxbuf *rxbuf = NULL, *tmp_buf = NULL;
610 struct ath_htc_rx_status *rxstatus;
611 u32 len = 0;
612
613 spin_lock(&priv->rx.rxbuflock);
614 list_for_each_entry(tmp_buf, &priv->rx.rxbuf, list) {
615 if (!tmp_buf->in_process) {
616 rxbuf = tmp_buf;
617 break;
618 }
619 }
620 spin_unlock(&priv->rx.rxbuflock);
621
622 if (rxbuf == NULL) {
623 ath_print(common, ATH_DBG_ANY,
624 "No free RX buffer\n");
625 goto err;
626 }
627
628 len = skb->len;
629 if (len <= HTC_RX_FRAME_HEADER_SIZE) {
630 ath_print(common, ATH_DBG_FATAL,
631 "Corrupted RX frame, dropping\n");
632 goto err;
633 }
634
635 rxstatus = (struct ath_htc_rx_status *)skb->data;
636
637 rxstatus->rs_tstamp = be64_to_cpu(rxstatus->rs_tstamp);
638 rxstatus->rs_datalen = be16_to_cpu(rxstatus->rs_datalen);
639 rxstatus->evm0 = be32_to_cpu(rxstatus->evm0);
640 rxstatus->evm1 = be32_to_cpu(rxstatus->evm1);
641 rxstatus->evm2 = be32_to_cpu(rxstatus->evm2);
642
643 if (rxstatus->rs_datalen - (len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
644 ath_print(common, ATH_DBG_FATAL,
645 "Corrupted RX data len, dropping "
646 "(epid: %d, dlen: %d, skblen: %d)\n",
647 ep_id, rxstatus->rs_datalen, len);
648 goto err;
649 }
650
651 spin_lock(&priv->rx.rxbuflock);
652 memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
653 skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
654 rxbuf->skb = skb;
655 rxbuf->in_process = true;
656 spin_unlock(&priv->rx.rxbuflock);
657
658 tasklet_schedule(&priv->rx_tasklet);
659 return;
660err:
661 dev_kfree_skb_any(skb);
662 return;
663}
664
665/* FIXME: Locking for cleanup/init */
666
667void ath9k_rx_cleanup(struct ath9k_htc_priv *priv)
668{
669 struct ath9k_htc_rxbuf *rxbuf, *tbuf;
670
671 list_for_each_entry_safe(rxbuf, tbuf, &priv->rx.rxbuf, list) {
672 list_del(&rxbuf->list);
673 if (rxbuf->skb)
674 dev_kfree_skb_any(rxbuf->skb);
675 kfree(rxbuf);
676 }
677}
678
679int ath9k_rx_init(struct ath9k_htc_priv *priv)
680{
681 struct ath_hw *ah = priv->ah;
682 struct ath_common *common = ath9k_hw_common(ah);
683 struct ath9k_htc_rxbuf *rxbuf;
684 int i = 0;
685
686 INIT_LIST_HEAD(&priv->rx.rxbuf);
687 spin_lock_init(&priv->rx.rxbuflock);
688
689 for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
690 rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
691 if (rxbuf == NULL) {
692 ath_print(common, ATH_DBG_FATAL,
693 "Unable to allocate RX buffers\n");
694 goto err;
695 }
696 list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
697 }
698
699 return 0;
700
701err:
702 ath9k_rx_cleanup(priv);
703 return -ENOMEM;
704}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
new file mode 100644
index 000000000000..587d98ed0989
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -0,0 +1,476 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static int htc_issue_send(struct htc_target *target, struct sk_buff* skb,
20 u16 len, u8 flags, u8 epid,
21 struct ath9k_htc_tx_ctl *tx_ctl)
22{
23 struct htc_frame_hdr *hdr;
24 struct htc_endpoint *endpoint = &target->endpoint[epid];
25 int status;
26
27 hdr = (struct htc_frame_hdr *)
28 skb_push(skb, sizeof(struct htc_frame_hdr));
29 hdr->endpoint_id = epid;
30 hdr->flags = flags;
31 hdr->payload_len = cpu_to_be16(len);
32
33 status = target->hif->send(target->hif_dev, endpoint->ul_pipeid, skb,
34 tx_ctl);
35 return status;
36}
37
38static struct htc_endpoint *get_next_avail_ep(struct htc_endpoint *endpoint)
39{
40 enum htc_endpoint_id avail_epid;
41
42 for (avail_epid = ENDPOINT_MAX; avail_epid > ENDPOINT0; avail_epid--)
43 if (endpoint[avail_epid].service_id == 0)
44 return &endpoint[avail_epid];
45 return NULL;
46}
47
48static u8 service_to_ulpipe(u16 service_id)
49{
50 switch (service_id) {
51 case WMI_CONTROL_SVC:
52 return 4;
53 case WMI_BEACON_SVC:
54 case WMI_CAB_SVC:
55 case WMI_UAPSD_SVC:
56 case WMI_MGMT_SVC:
57 case WMI_DATA_VO_SVC:
58 case WMI_DATA_VI_SVC:
59 case WMI_DATA_BE_SVC:
60 case WMI_DATA_BK_SVC:
61 return 1;
62 default:
63 return 0;
64 }
65}
66
67static u8 service_to_dlpipe(u16 service_id)
68{
69 switch (service_id) {
70 case WMI_CONTROL_SVC:
71 return 3;
72 case WMI_BEACON_SVC:
73 case WMI_CAB_SVC:
74 case WMI_UAPSD_SVC:
75 case WMI_MGMT_SVC:
76 case WMI_DATA_VO_SVC:
77 case WMI_DATA_VI_SVC:
78 case WMI_DATA_BE_SVC:
79 case WMI_DATA_BK_SVC:
80 return 2;
81 default:
82 return 0;
83 }
84}
85
86static void htc_process_target_rdy(struct htc_target *target,
87 void *buf)
88{
89 struct htc_endpoint *endpoint;
90 struct htc_ready_msg *htc_ready_msg = (struct htc_ready_msg *) buf;
91
92 target->credits = be16_to_cpu(htc_ready_msg->credits);
93 target->credit_size = be16_to_cpu(htc_ready_msg->credit_size);
94
95 endpoint = &target->endpoint[ENDPOINT0];
96 endpoint->service_id = HTC_CTRL_RSVD_SVC;
97 endpoint->max_msglen = HTC_MAX_CONTROL_MESSAGE_LENGTH;
98 complete(&target->target_wait);
99}
100
101static void htc_process_conn_rsp(struct htc_target *target,
102 struct htc_frame_hdr *htc_hdr)
103{
104 struct htc_conn_svc_rspmsg *svc_rspmsg;
105 struct htc_endpoint *endpoint, *tmp_endpoint = NULL;
106 u16 service_id;
107 u16 max_msglen;
108 enum htc_endpoint_id epid, tepid;
109
110 svc_rspmsg = (struct htc_conn_svc_rspmsg *)
111 ((void *) htc_hdr + sizeof(struct htc_frame_hdr));
112
113 if (svc_rspmsg->status == HTC_SERVICE_SUCCESS) {
114 epid = svc_rspmsg->endpoint_id;
115 service_id = be16_to_cpu(svc_rspmsg->service_id);
116 max_msglen = be16_to_cpu(svc_rspmsg->max_msg_len);
117 endpoint = &target->endpoint[epid];
118
119 for (tepid = ENDPOINT_MAX; tepid > ENDPOINT0; tepid--) {
120 tmp_endpoint = &target->endpoint[tepid];
121 if (tmp_endpoint->service_id == service_id) {
122 tmp_endpoint->service_id = 0;
123 break;
124 }
125 }
126
127 if (!tmp_endpoint)
128 return;
129
130 endpoint->service_id = service_id;
131 endpoint->max_txqdepth = tmp_endpoint->max_txqdepth;
132 endpoint->ep_callbacks = tmp_endpoint->ep_callbacks;
133 endpoint->ul_pipeid = tmp_endpoint->ul_pipeid;
134 endpoint->dl_pipeid = tmp_endpoint->dl_pipeid;
135 endpoint->max_msglen = max_msglen;
136 target->conn_rsp_epid = epid;
137 complete(&target->cmd_wait);
138 } else {
139 target->conn_rsp_epid = ENDPOINT_UNUSED;
140 }
141}
142
143static int htc_config_pipe_credits(struct htc_target *target)
144{
145 struct sk_buff *skb;
146 struct htc_config_pipe_msg *cp_msg;
147 int ret, time_left;
148
149 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
150 if (!skb) {
151 dev_err(target->dev, "failed to allocate send buffer\n");
152 return -ENOMEM;
153 }
154 skb_reserve(skb, sizeof(struct htc_frame_hdr));
155
156 cp_msg = (struct htc_config_pipe_msg *)
157 skb_put(skb, sizeof(struct htc_config_pipe_msg));
158
159 cp_msg->message_id = cpu_to_be16(HTC_MSG_CONFIG_PIPE_ID);
160 cp_msg->pipe_id = USB_WLAN_TX_PIPE;
161 cp_msg->credits = 28;
162
163 target->htc_flags |= HTC_OP_CONFIG_PIPE_CREDITS;
164
165 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
166 if (ret)
167 goto err;
168
169 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
170 if (!time_left) {
171 dev_err(target->dev, "HTC credit config timeout\n");
172 return -ETIMEDOUT;
173 }
174
175 return 0;
176err:
177 kfree_skb(skb);
178 return -EINVAL;
179}
180
181static int htc_setup_complete(struct htc_target *target)
182{
183 struct sk_buff *skb;
184 struct htc_comp_msg *comp_msg;
185 int ret = 0, time_left;
186
187 skb = alloc_skb(50 + sizeof(struct htc_frame_hdr), GFP_ATOMIC);
188 if (!skb) {
189 dev_err(target->dev, "failed to allocate send buffer\n");
190 return -ENOMEM;
191 }
192 skb_reserve(skb, sizeof(struct htc_frame_hdr));
193
194 comp_msg = (struct htc_comp_msg *)
195 skb_put(skb, sizeof(struct htc_comp_msg));
196 comp_msg->msg_id = cpu_to_be16(HTC_MSG_SETUP_COMPLETE_ID);
197
198 target->htc_flags |= HTC_OP_START_WAIT;
199
200 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
201 if (ret)
202 goto err;
203
204 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
205 if (!time_left) {
206 dev_err(target->dev, "HTC start timeout\n");
207 return -ETIMEDOUT;
208 }
209
210 return 0;
211
212err:
213 kfree_skb(skb);
214 return -EINVAL;
215}
216
217/* HTC APIs */
218
219int htc_init(struct htc_target *target)
220{
221 int ret;
222
223 ret = htc_config_pipe_credits(target);
224 if (ret)
225 return ret;
226
227 return htc_setup_complete(target);
228}
229
230int htc_connect_service(struct htc_target *target,
231 struct htc_service_connreq *service_connreq,
232 enum htc_endpoint_id *conn_rsp_epid)
233{
234 struct sk_buff *skb;
235 struct htc_endpoint *endpoint;
236 struct htc_conn_svc_msg *conn_msg;
237 int ret, time_left;
238
239 /* Find an available endpoint */
240 endpoint = get_next_avail_ep(target->endpoint);
241 if (!endpoint) {
242 dev_err(target->dev, "Endpoint is not available for"
243 "service %d\n", service_connreq->service_id);
244 return -EINVAL;
245 }
246
247 endpoint->service_id = service_connreq->service_id;
248 endpoint->max_txqdepth = service_connreq->max_send_qdepth;
249 endpoint->ul_pipeid = service_to_ulpipe(service_connreq->service_id);
250 endpoint->dl_pipeid = service_to_dlpipe(service_connreq->service_id);
251 endpoint->ep_callbacks = service_connreq->ep_callbacks;
252
253 skb = alloc_skb(sizeof(struct htc_conn_svc_msg) +
254 sizeof(struct htc_frame_hdr), GFP_ATOMIC);
255 if (!skb) {
256 dev_err(target->dev, "Failed to allocate buf to send"
257 "service connect req\n");
258 return -ENOMEM;
259 }
260
261 skb_reserve(skb, sizeof(struct htc_frame_hdr));
262
263 conn_msg = (struct htc_conn_svc_msg *)
264 skb_put(skb, sizeof(struct htc_conn_svc_msg));
265 conn_msg->service_id = cpu_to_be16(service_connreq->service_id);
266 conn_msg->msg_id = cpu_to_be16(HTC_MSG_CONNECT_SERVICE_ID);
267 conn_msg->con_flags = cpu_to_be16(service_connreq->con_flags);
268 conn_msg->dl_pipeid = endpoint->dl_pipeid;
269 conn_msg->ul_pipeid = endpoint->ul_pipeid;
270
271 ret = htc_issue_send(target, skb, skb->len, 0, ENDPOINT0, NULL);
272 if (ret)
273 goto err;
274
275 time_left = wait_for_completion_timeout(&target->cmd_wait, HZ);
276 if (!time_left) {
277 dev_err(target->dev, "Service connection timeout for: %d\n",
278 service_connreq->service_id);
279 return -ETIMEDOUT;
280 }
281
282 *conn_rsp_epid = target->conn_rsp_epid;
283 return 0;
284err:
285 kfree_skb(skb);
286 return ret;
287}
288
289int htc_send(struct htc_target *target, struct sk_buff *skb,
290 enum htc_endpoint_id epid, struct ath9k_htc_tx_ctl *tx_ctl)
291{
292 return htc_issue_send(target, skb, skb->len, 0, epid, tx_ctl);
293}
294
295void htc_stop(struct htc_target *target)
296{
297 enum htc_endpoint_id epid;
298 struct htc_endpoint *endpoint;
299
300 for (epid = ENDPOINT0; epid <= ENDPOINT_MAX; epid++) {
301 endpoint = &target->endpoint[epid];
302 if (endpoint->service_id != 0)
303 target->hif->stop(target->hif_dev, endpoint->ul_pipeid);
304 }
305}
306
307void htc_start(struct htc_target *target)
308{
309 enum htc_endpoint_id epid;
310 struct htc_endpoint *endpoint;
311
312 for (epid = ENDPOINT0; epid <= ENDPOINT_MAX; epid++) {
313 endpoint = &target->endpoint[epid];
314 if (endpoint->service_id != 0)
315 target->hif->start(target->hif_dev,
316 endpoint->ul_pipeid);
317 }
318}
319
320void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
321 struct sk_buff *skb, bool txok)
322{
323 struct htc_endpoint *endpoint;
324 struct htc_frame_hdr *htc_hdr = NULL;
325
326 if (htc_handle->htc_flags & HTC_OP_CONFIG_PIPE_CREDITS) {
327 complete(&htc_handle->cmd_wait);
328 htc_handle->htc_flags &= ~HTC_OP_CONFIG_PIPE_CREDITS;
329 goto ret;
330 }
331
332 if (htc_handle->htc_flags & HTC_OP_START_WAIT) {
333 complete(&htc_handle->cmd_wait);
334 htc_handle->htc_flags &= ~HTC_OP_START_WAIT;
335 goto ret;
336 }
337
338 if (skb) {
339 htc_hdr = (struct htc_frame_hdr *) skb->data;
340 endpoint = &htc_handle->endpoint[htc_hdr->endpoint_id];
341 skb_pull(skb, sizeof(struct htc_frame_hdr));
342
343 if (endpoint->ep_callbacks.tx) {
344 endpoint->ep_callbacks.tx(htc_handle->drv_priv, skb,
345 htc_hdr->endpoint_id, txok);
346 }
347 }
348
349 return;
350ret:
351 /* HTC-generated packets are freed here. */
352 if (htc_hdr && htc_hdr->endpoint_id != ENDPOINT0)
353 dev_kfree_skb_any(skb);
354 else
355 kfree_skb(skb);
356}
357
358/*
359 * HTC Messages are handled directly here and the obtained SKB
360 * is freed.
361 *
362 * Sevice messages (Data, WMI) passed to the corresponding
363 * endpoint RX handlers, which have to free the SKB.
364 */
365void ath9k_htc_rx_msg(struct htc_target *htc_handle,
366 struct sk_buff *skb, u32 len, u8 pipe_id)
367{
368 struct htc_frame_hdr *htc_hdr;
369 enum htc_endpoint_id epid;
370 struct htc_endpoint *endpoint;
371 u16 *msg_id;
372
373 if (!htc_handle || !skb)
374 return;
375
376 htc_hdr = (struct htc_frame_hdr *) skb->data;
377 epid = htc_hdr->endpoint_id;
378
379 if (epid >= ENDPOINT_MAX) {
380 if (pipe_id != USB_REG_IN_PIPE)
381 dev_kfree_skb_any(skb);
382 else
383 kfree_skb(skb);
384 return;
385 }
386
387 if (epid == ENDPOINT0) {
388
389 /* Handle trailer */
390 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER) {
391 if (be32_to_cpu(*(u32 *) skb->data) == 0x00C60000)
392 /* Move past the Watchdog pattern */
393 htc_hdr = (struct htc_frame_hdr *)(skb->data + 4);
394 }
395
396 /* Get the message ID */
397 msg_id = (u16 *) ((void *) htc_hdr +
398 sizeof(struct htc_frame_hdr));
399
400 /* Now process HTC messages */
401 switch (be16_to_cpu(*msg_id)) {
402 case HTC_MSG_READY_ID:
403 htc_process_target_rdy(htc_handle, htc_hdr);
404 break;
405 case HTC_MSG_CONNECT_SERVICE_RESPONSE_ID:
406 htc_process_conn_rsp(htc_handle, htc_hdr);
407 break;
408 default:
409 break;
410 }
411
412 kfree_skb(skb);
413
414 } else {
415 if (htc_hdr->flags & HTC_FLAGS_RECV_TRAILER)
416 skb_trim(skb, len - htc_hdr->control[0]);
417
418 skb_pull(skb, sizeof(struct htc_frame_hdr));
419
420 endpoint = &htc_handle->endpoint[epid];
421 if (endpoint->ep_callbacks.rx)
422 endpoint->ep_callbacks.rx(endpoint->ep_callbacks.priv,
423 skb, epid);
424 }
425}
426
427struct htc_target *ath9k_htc_hw_alloc(void *hif_handle)
428{
429 struct htc_target *target;
430
431 target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
432 if (!target)
433 printk(KERN_ERR "Unable to allocate memory for"
434 "target device\n");
435
436 return target;
437}
438
439void ath9k_htc_hw_free(struct htc_target *htc)
440{
441 kfree(htc);
442}
443
444int ath9k_htc_hw_init(struct ath9k_htc_hif *hif, struct htc_target *target,
445 void *hif_handle, struct device *dev, u16 devid,
446 enum ath9k_hif_transports transport)
447{
448 struct htc_endpoint *endpoint;
449 int err = 0;
450
451 init_completion(&target->target_wait);
452 init_completion(&target->cmd_wait);
453
454 target->hif = hif;
455 target->hif_dev = hif_handle;
456 target->dev = dev;
457
458 /* Assign control endpoint pipe IDs */
459 endpoint = &target->endpoint[ENDPOINT0];
460 endpoint->ul_pipeid = hif->control_ul_pipe;
461 endpoint->dl_pipeid = hif->control_dl_pipe;
462
463 err = ath9k_htc_probe_device(target, dev, devid);
464 if (err) {
465 printk(KERN_ERR "Failed to initialize the device\n");
466 return -ENODEV;
467 }
468
469 return 0;
470}
471
472void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug)
473{
474 if (target)
475 ath9k_htc_disconnect_device(target, hot_unplug);
476}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
new file mode 100644
index 000000000000..cd7048ffd239
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -0,0 +1,246 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef HTC_HST_H
18#define HTC_HST_H
19
20struct ath9k_htc_priv;
21struct htc_target;
22struct ath9k_htc_tx_ctl;
23
24enum ath9k_hif_transports {
25 ATH9K_HIF_USB,
26};
27
28struct ath9k_htc_hif {
29 struct list_head list;
30 const enum ath9k_hif_transports transport;
31 const char *name;
32
33 u8 control_dl_pipe;
34 u8 control_ul_pipe;
35
36 void (*start) (void *hif_handle, u8 pipe);
37 void (*stop) (void *hif_handle, u8 pipe);
38 int (*send) (void *hif_handle, u8 pipe, struct sk_buff *buf,
39 struct ath9k_htc_tx_ctl *tx_ctl);
40};
41
42enum htc_endpoint_id {
43 ENDPOINT_UNUSED = -1,
44 ENDPOINT0 = 0,
45 ENDPOINT1 = 1,
46 ENDPOINT2 = 2,
47 ENDPOINT3 = 3,
48 ENDPOINT4 = 4,
49 ENDPOINT5 = 5,
50 ENDPOINT6 = 6,
51 ENDPOINT7 = 7,
52 ENDPOINT8 = 8,
53 ENDPOINT_MAX = 22
54};
55
56/* Htc frame hdr flags */
57#define HTC_FLAGS_RECV_TRAILER (1 << 1)
58
59struct htc_frame_hdr {
60 u8 endpoint_id;
61 u8 flags;
62 u16 payload_len;
63 u8 control[4];
64} __packed;
65
66struct htc_ready_msg {
67 u16 message_id;
68 u16 credits;
69 u16 credit_size;
70 u8 max_endpoints;
71 u8 pad;
72} __packed;
73
74struct htc_config_pipe_msg {
75 u16 message_id;
76 u8 pipe_id;
77 u8 credits;
78} __packed;
79
80struct htc_packet {
81 void *pktcontext;
82 u8 *buf;
83 u8 *buf_payload;
84 u32 buflen;
85 u32 payload_len;
86
87 int endpoint;
88 int status;
89
90 void *context;
91 u32 reserved;
92};
93
94struct htc_ep_callbacks {
95 void *priv;
96 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
97 void (*rx) (void *, struct sk_buff *, enum htc_endpoint_id);
98};
99
100#define HTC_TX_QUEUE_SIZE 256
101
102struct htc_txq {
103 struct sk_buff *buf[HTC_TX_QUEUE_SIZE];
104 u32 txqdepth;
105 u16 txbuf_cnt;
106 u16 txq_head;
107 u16 txq_tail;
108};
109
110struct htc_endpoint {
111 u16 service_id;
112
113 struct htc_ep_callbacks ep_callbacks;
114 struct htc_txq htc_txq;
115 u32 max_txqdepth;
116 int max_msglen;
117
118 u8 ul_pipeid;
119 u8 dl_pipeid;
120};
121
122#define HTC_MAX_CONTROL_MESSAGE_LENGTH 255
123#define HTC_CONTROL_BUFFER_SIZE \
124 (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
125
126#define NUM_CONTROL_BUFFERS 8
127#define HST_ENDPOINT_MAX 8
128
129struct htc_control_buf {
130 struct htc_packet htc_pkt;
131 u8 buf[HTC_CONTROL_BUFFER_SIZE];
132};
133
134#define HTC_OP_START_WAIT BIT(0)
135#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
136
137struct htc_target {
138 void *hif_dev;
139 struct ath9k_htc_priv *drv_priv;
140 struct device *dev;
141 struct ath9k_htc_hif *hif;
142 struct htc_endpoint endpoint[HST_ENDPOINT_MAX];
143 struct completion target_wait;
144 struct completion cmd_wait;
145 struct list_head list;
146 enum htc_endpoint_id conn_rsp_epid;
147 u16 credits;
148 u16 credit_size;
149 u8 htc_flags;
150};
151
152enum htc_msg_id {
153 HTC_MSG_READY_ID = 1,
154 HTC_MSG_CONNECT_SERVICE_ID,
155 HTC_MSG_CONNECT_SERVICE_RESPONSE_ID,
156 HTC_MSG_SETUP_COMPLETE_ID,
157 HTC_MSG_CONFIG_PIPE_ID,
158 HTC_MSG_CONFIG_PIPE_RESPONSE_ID,
159};
160
161struct htc_service_connreq {
162 u16 service_id;
163 u16 con_flags;
164 u32 max_send_qdepth;
165 struct htc_ep_callbacks ep_callbacks;
166};
167
168/* Current service IDs */
169
170enum htc_service_group_ids{
171 RSVD_SERVICE_GROUP = 0,
172 WMI_SERVICE_GROUP = 1,
173
174 HTC_SERVICE_GROUP_LAST = 255
175};
176
177#define MAKE_SERVICE_ID(group, index) \
178 (int)(((int)group << 8) | (int)(index))
179
180/* NOTE: service ID of 0x0000 is reserved and should never be used */
181#define HTC_CTRL_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 1)
182#define HTC_LOOPBACK_RSVD_SVC MAKE_SERVICE_ID(RSVD_SERVICE_GROUP, 2)
183
184#define WMI_CONTROL_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 0)
185#define WMI_BEACON_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 1)
186#define WMI_CAB_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 2)
187#define WMI_UAPSD_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 3)
188#define WMI_MGMT_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 4)
189#define WMI_DATA_VO_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 5)
190#define WMI_DATA_VI_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 6)
191#define WMI_DATA_BE_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 7)
192#define WMI_DATA_BK_SVC MAKE_SERVICE_ID(WMI_SERVICE_GROUP, 8)
193
194struct htc_conn_svc_msg {
195 u16 msg_id;
196 u16 service_id;
197 u16 con_flags;
198 u8 dl_pipeid;
199 u8 ul_pipeid;
200 u8 svc_meta_len;
201 u8 pad;
202} __packed;
203
204/* connect response status codes */
205#define HTC_SERVICE_SUCCESS 0
206#define HTC_SERVICE_NOT_FOUND 1
207#define HTC_SERVICE_FAILED 2
208#define HTC_SERVICE_NO_RESOURCES 3
209#define HTC_SERVICE_NO_MORE_EP 4
210
211struct htc_conn_svc_rspmsg {
212 u16 msg_id;
213 u16 service_id;
214 u8 status;
215 u8 endpoint_id;
216 u16 max_msg_len;
217 u8 svc_meta_len;
218 u8 pad;
219} __packed;
220
221struct htc_comp_msg {
222 u16 msg_id;
223} __packed;
224
225int htc_init(struct htc_target *target);
226int htc_connect_service(struct htc_target *target,
227 struct htc_service_connreq *service_connreq,
228 enum htc_endpoint_id *conn_rsp_eid);
229int htc_send(struct htc_target *target, struct sk_buff *skb,
230 enum htc_endpoint_id eid, struct ath9k_htc_tx_ctl *tx_ctl);
231void htc_stop(struct htc_target *target);
232void htc_start(struct htc_target *target);
233
234void ath9k_htc_rx_msg(struct htc_target *htc_handle,
235 struct sk_buff *skb, u32 len, u8 pipe_id);
236void ath9k_htc_txcompletion_cb(struct htc_target *htc_handle,
237 struct sk_buff *skb, bool txok);
238
239struct htc_target *ath9k_htc_hw_alloc(void *hif_handle);
240void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct ath9k_htc_hif *hif, struct htc_target *target,
242 void *hif_handle, struct device *dev, u16 devid,
243 enum ath9k_hif_transports transport);
244void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
245
246#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 78b571129c92..af730c7d50e6 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -28,9 +28,6 @@
28 28
29static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 29static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
30static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan); 30static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
31static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
32 struct ar5416_eeprom_def *pEepData,
33 u32 reg, u32 value);
34 31
35MODULE_AUTHOR("Atheros Communications"); 32MODULE_AUTHOR("Atheros Communications");
36MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards."); 33MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
@@ -500,8 +497,10 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
500{ 497{
501 int ecode; 498 int ecode;
502 499
503 if (!ath9k_hw_chip_test(ah)) 500 if (!AR_SREV_9271(ah)) {
504 return -ENODEV; 501 if (!ath9k_hw_chip_test(ah))
502 return -ENODEV;
503 }
505 504
506 ecode = ath9k_hw_rf_claim(ah); 505 ecode = ath9k_hw_rf_claim(ah);
507 if (ecode != 0) 506 if (ecode != 0)
@@ -546,7 +545,6 @@ static bool ath9k_hw_devid_supported(u16 devid)
546 case AR9285_DEVID_PCIE: 545 case AR9285_DEVID_PCIE:
547 case AR5416_DEVID_AR9287_PCI: 546 case AR5416_DEVID_AR9287_PCI:
548 case AR5416_DEVID_AR9287_PCIE: 547 case AR5416_DEVID_AR9287_PCIE:
549 case AR9271_USB:
550 case AR2427_DEVID_PCIE: 548 case AR2427_DEVID_PCIE:
551 return true; 549 return true;
552 default: 550 default:
@@ -604,9 +602,23 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
604 ARRAY_SIZE(ar9271Modes_9271), 6); 602 ARRAY_SIZE(ar9271Modes_9271), 6);
605 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271, 603 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
606 ARRAY_SIZE(ar9271Common_9271), 2); 604 ARRAY_SIZE(ar9271Common_9271), 2);
605 INIT_INI_ARRAY(&ah->iniCommon_normal_cck_fir_coeff_9271,
606 ar9271Common_normal_cck_fir_coeff_9271,
607 ARRAY_SIZE(ar9271Common_normal_cck_fir_coeff_9271), 2);
608 INIT_INI_ARRAY(&ah->iniCommon_japan_2484_cck_fir_coeff_9271,
609 ar9271Common_japan_2484_cck_fir_coeff_9271,
610 ARRAY_SIZE(ar9271Common_japan_2484_cck_fir_coeff_9271), 2);
607 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only, 611 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
608 ar9271Modes_9271_1_0_only, 612 ar9271Modes_9271_1_0_only,
609 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6); 613 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
614 INIT_INI_ARRAY(&ah->iniModes_9271_ANI_reg, ar9271Modes_9271_ANI_reg,
615 ARRAY_SIZE(ar9271Modes_9271_ANI_reg), 6);
616 INIT_INI_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
617 ar9271Modes_high_power_tx_gain_9271,
618 ARRAY_SIZE(ar9271Modes_high_power_tx_gain_9271), 6);
619 INIT_INI_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
620 ar9271Modes_normal_power_tx_gain_9271,
621 ARRAY_SIZE(ar9271Modes_normal_power_tx_gain_9271), 6);
610 return; 622 return;
611 } 623 }
612 624
@@ -801,38 +813,46 @@ static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
801 813
802 /* txgain table */ 814 /* txgain table */
803 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) { 815 if (txgain_type == AR5416_EEP_TXGAIN_HIGH_POWER) {
804 INIT_INI_ARRAY(&ah->iniModesTxGain, 816 if (AR_SREV_9285E_20(ah)) {
805 ar9285Modes_high_power_tx_gain_9285_1_2, 817 INIT_INI_ARRAY(&ah->iniModesTxGain,
806 ARRAY_SIZE(ar9285Modes_high_power_tx_gain_9285_1_2), 6); 818 ar9285Modes_XE2_0_high_power,
819 ARRAY_SIZE(
820 ar9285Modes_XE2_0_high_power), 6);
821 } else {
822 INIT_INI_ARRAY(&ah->iniModesTxGain,
823 ar9285Modes_high_power_tx_gain_9285_1_2,
824 ARRAY_SIZE(
825 ar9285Modes_high_power_tx_gain_9285_1_2), 6);
826 }
807 } else { 827 } else {
808 INIT_INI_ARRAY(&ah->iniModesTxGain, 828 if (AR_SREV_9285E_20(ah)) {
809 ar9285Modes_original_tx_gain_9285_1_2, 829 INIT_INI_ARRAY(&ah->iniModesTxGain,
810 ARRAY_SIZE(ar9285Modes_original_tx_gain_9285_1_2), 6); 830 ar9285Modes_XE2_0_normal_power,
831 ARRAY_SIZE(
832 ar9285Modes_XE2_0_normal_power), 6);
833 } else {
834 INIT_INI_ARRAY(&ah->iniModesTxGain,
835 ar9285Modes_original_tx_gain_9285_1_2,
836 ARRAY_SIZE(
837 ar9285Modes_original_tx_gain_9285_1_2), 6);
838 }
811 } 839 }
812
813 } 840 }
814} 841}
815 842
816static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah) 843static void ath9k_hw_init_eeprom_fix(struct ath_hw *ah)
817{ 844{
818 u32 i, j; 845 struct base_eep_header *pBase = &(ah->eeprom.def.baseEepHeader);
819 846 struct ath_common *common = ath9k_hw_common(ah);
820 if (ah->hw_version.devid == AR9280_DEVID_PCI) {
821
822 /* EEPROM Fixup */
823 for (i = 0; i < ah->iniModes.ia_rows; i++) {
824 u32 reg = INI_RA(&ah->iniModes, i, 0);
825 847
826 for (j = 1; j < ah->iniModes.ia_columns; j++) { 848 ah->need_an_top2_fixup = (ah->hw_version.devid == AR9280_DEVID_PCI) &&
827 u32 val = INI_RA(&ah->iniModes, i, j); 849 (ah->eep_map != EEP_MAP_4KBITS) &&
850 ((pBase->version & 0xff) > 0x0a) &&
851 (pBase->pwdclkind == 0);
828 852
829 INI_RA(&ah->iniModes, i, j) = 853 if (ah->need_an_top2_fixup)
830 ath9k_hw_ini_fixup(ah, 854 ath_print(common, ATH_DBG_EEPROM,
831 &ah->eeprom.def, 855 "needs fixup for AR_AN_TOP2 register\n");
832 reg, val);
833 }
834 }
835 }
836} 856}
837 857
838int ath9k_hw_init(struct ath_hw *ah) 858int ath9k_hw_init(struct ath_hw *ah)
@@ -840,11 +860,13 @@ int ath9k_hw_init(struct ath_hw *ah)
840 struct ath_common *common = ath9k_hw_common(ah); 860 struct ath_common *common = ath9k_hw_common(ah);
841 int r = 0; 861 int r = 0;
842 862
843 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) { 863 if (common->bus_ops->ath_bus_type != ATH_USB) {
844 ath_print(common, ATH_DBG_FATAL, 864 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
845 "Unsupported device ID: 0x%0x\n", 865 ath_print(common, ATH_DBG_FATAL,
846 ah->hw_version.devid); 866 "Unsupported device ID: 0x%0x\n",
847 return -EOPNOTSUPP; 867 ah->hw_version.devid);
868 return -EOPNOTSUPP;
869 }
848 } 870 }
849 871
850 ath9k_hw_init_defaults(ah); 872 ath9k_hw_init_defaults(ah);
@@ -991,22 +1013,6 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
991 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 1013 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
992} 1014}
993 1015
994static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
995{
996 u32 lcr;
997 u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
998
999 lcr = REG_READ(ah , 0x5100c);
1000 lcr |= 0x80;
1001
1002 REG_WRITE(ah, 0x5100c, lcr);
1003 REG_WRITE(ah, 0x51004, (baud_divider >> 8));
1004 REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
1005
1006 lcr &= ~0x80;
1007 REG_WRITE(ah, 0x5100c, lcr);
1008}
1009
1010static void ath9k_hw_init_pll(struct ath_hw *ah, 1016static void ath9k_hw_init_pll(struct ath_hw *ah,
1011 struct ath9k_channel *chan) 1017 struct ath9k_channel *chan)
1012{ 1018{
@@ -1072,22 +1078,8 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
1072 1078
1073 /* Switch the core clock for ar9271 to 117Mhz */ 1079 /* Switch the core clock for ar9271 to 117Mhz */
1074 if (AR_SREV_9271(ah)) { 1080 if (AR_SREV_9271(ah)) {
1075 if ((pll == 0x142c) || (pll == 0x2850) ) { 1081 udelay(500);
1076 udelay(500); 1082 REG_WRITE(ah, 0x50040, 0x304);
1077 /* set CLKOBS to output AHB clock */
1078 REG_WRITE(ah, 0x7020, 0xe);
1079 /*
1080 * 0x304: 117Mhz, ahb_ratio: 1x1
1081 * 0x306: 40Mhz, ahb_ratio: 1x1
1082 */
1083 REG_WRITE(ah, 0x50040, 0x304);
1084 /*
1085 * makes adjustments for the baud dividor to keep the
1086 * targetted baud rate based on the used core clock.
1087 */
1088 ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
1089 AR9271_TARGET_BAUD_RATE);
1090 }
1091 } 1083 }
1092 1084
1093 udelay(RTC_PLL_SETTLE_DELAY); 1085 udelay(RTC_PLL_SETTLE_DELAY);
@@ -1135,24 +1127,25 @@ static void ath9k_hw_init_chain_masks(struct ath_hw *ah)
1135static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 1127static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1136 enum nl80211_iftype opmode) 1128 enum nl80211_iftype opmode)
1137{ 1129{
1138 ah->mask_reg = AR_IMR_TXERR | 1130 u32 imr_reg = AR_IMR_TXERR |
1139 AR_IMR_TXURN | 1131 AR_IMR_TXURN |
1140 AR_IMR_RXERR | 1132 AR_IMR_RXERR |
1141 AR_IMR_RXORN | 1133 AR_IMR_RXORN |
1142 AR_IMR_BCNMISC; 1134 AR_IMR_BCNMISC;
1143 1135
1144 if (ah->config.rx_intr_mitigation) 1136 if (ah->config.rx_intr_mitigation)
1145 ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR; 1137 imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
1146 else 1138 else
1147 ah->mask_reg |= AR_IMR_RXOK; 1139 imr_reg |= AR_IMR_RXOK;
1148 1140
1149 ah->mask_reg |= AR_IMR_TXOK; 1141 imr_reg |= AR_IMR_TXOK;
1150 1142
1151 if (opmode == NL80211_IFTYPE_AP) 1143 if (opmode == NL80211_IFTYPE_AP)
1152 ah->mask_reg |= AR_IMR_MIB; 1144 imr_reg |= AR_IMR_MIB;
1153 1145
1154 REG_WRITE(ah, AR_IMR, ah->mask_reg); 1146 REG_WRITE(ah, AR_IMR, imr_reg);
1155 REG_WRITE(ah, AR_IMR_S2, REG_READ(ah, AR_IMR_S2) | AR_IMR_S2_GTT); 1147 ah->imrs2_reg |= AR_IMR_S2_GTT;
1148 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
1156 1149
1157 if (!AR_SREV_9100(ah)) { 1150 if (!AR_SREV_9100(ah)) {
1158 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF); 1151 REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
@@ -1241,7 +1234,7 @@ void ath9k_hw_deinit(struct ath_hw *ah)
1241{ 1234{
1242 struct ath_common *common = ath9k_hw_common(ah); 1235 struct ath_common *common = ath9k_hw_common(ah);
1243 1236
1244 if (common->state <= ATH_HW_INITIALIZED) 1237 if (common->state < ATH_HW_INITIALIZED)
1245 goto free_hw; 1238 goto free_hw;
1246 1239
1247 if (!AR_SREV_9100(ah)) 1240 if (!AR_SREV_9100(ah))
@@ -1252,8 +1245,6 @@ void ath9k_hw_deinit(struct ath_hw *ah)
1252free_hw: 1245free_hw:
1253 if (!AR_SREV_9280_10_OR_LATER(ah)) 1246 if (!AR_SREV_9280_10_OR_LATER(ah))
1254 ath9k_hw_rf_free_ext_banks(ah); 1247 ath9k_hw_rf_free_ext_banks(ah);
1255 kfree(ah);
1256 ah = NULL;
1257} 1248}
1258EXPORT_SYMBOL(ath9k_hw_deinit); 1249EXPORT_SYMBOL(ath9k_hw_deinit);
1259 1250
@@ -1266,26 +1257,6 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1266{ 1257{
1267 u32 val; 1258 u32 val;
1268 1259
1269 if (AR_SREV_9271(ah)) {
1270 /*
1271 * Enable spectral scan to solution for issues with stuck
1272 * beacons on AR9271 1.0. The beacon stuck issue is not seeon on
1273 * AR9271 1.1
1274 */
1275 if (AR_SREV_9271_10(ah)) {
1276 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
1277 AR_PHY_SPECTRAL_SCAN_ENABLE;
1278 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
1279 }
1280 else if (AR_SREV_9271_11(ah))
1281 /*
1282 * change AR_PHY_RF_CTL3 setting to fix MAC issue
1283 * present on AR9271 1.1
1284 */
1285 REG_WRITE(ah, AR_PHY_RF_CTL3, 0x3a020001);
1286 return;
1287 }
1288
1289 /* 1260 /*
1290 * Set the RX_ABORT and RX_DIS and clear if off only after 1261 * Set the RX_ABORT and RX_DIS and clear if off only after
1291 * RXE is set for MAC. This prevents frames with corrupted 1262 * RXE is set for MAC. This prevents frames with corrupted
@@ -1294,8 +1265,10 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1294 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 1265 REG_SET_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
1295 1266
1296 if (AR_SREV_9280_10_OR_LATER(ah)) { 1267 if (AR_SREV_9280_10_OR_LATER(ah)) {
1297 val = REG_READ(ah, AR_PCU_MISC_MODE2) & 1268 val = REG_READ(ah, AR_PCU_MISC_MODE2);
1298 (~AR_PCU_MISC_MODE2_HWWAR1); 1269
1270 if (!AR_SREV_9271(ah))
1271 val &= ~AR_PCU_MISC_MODE2_HWWAR1;
1299 1272
1300 if (AR_SREV_9287_10_OR_LATER(ah)) 1273 if (AR_SREV_9287_10_OR_LATER(ah))
1301 val = val & (~AR_PCU_MISC_MODE2_HWWAR2); 1274 val = val & (~AR_PCU_MISC_MODE2_HWWAR2);
@@ -1323,51 +1296,6 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1323 } 1296 }
1324} 1297}
1325 1298
1326static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
1327 struct ar5416_eeprom_def *pEepData,
1328 u32 reg, u32 value)
1329{
1330 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
1331 struct ath_common *common = ath9k_hw_common(ah);
1332
1333 switch (ah->hw_version.devid) {
1334 case AR9280_DEVID_PCI:
1335 if (reg == 0x7894) {
1336 ath_print(common, ATH_DBG_EEPROM,
1337 "ini VAL: %x EEPROM: %x\n", value,
1338 (pBase->version & 0xff));
1339
1340 if ((pBase->version & 0xff) > 0x0a) {
1341 ath_print(common, ATH_DBG_EEPROM,
1342 "PWDCLKIND: %d\n",
1343 pBase->pwdclkind);
1344 value &= ~AR_AN_TOP2_PWDCLKIND;
1345 value |= AR_AN_TOP2_PWDCLKIND &
1346 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
1347 } else {
1348 ath_print(common, ATH_DBG_EEPROM,
1349 "PWDCLKIND Earlier Rev\n");
1350 }
1351
1352 ath_print(common, ATH_DBG_EEPROM,
1353 "final ini VAL: %x\n", value);
1354 }
1355 break;
1356 }
1357
1358 return value;
1359}
1360
1361static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
1362 struct ar5416_eeprom_def *pEepData,
1363 u32 reg, u32 value)
1364{
1365 if (ah->eep_map == EEP_MAP_4KBITS)
1366 return value;
1367 else
1368 return ath9k_hw_def_ini_fixup(ah, pEepData, reg, value);
1369}
1370
1371static void ath9k_olc_init(struct ath_hw *ah) 1299static void ath9k_olc_init(struct ath_hw *ah)
1372{ 1300{
1373 u32 i; 1301 u32 i;
@@ -1439,7 +1367,10 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1439 return -EINVAL; 1367 return -EINVAL;
1440 } 1368 }
1441 1369
1370 /* Set correct baseband to analog shift setting to access analog chips */
1442 REG_WRITE(ah, AR_PHY(0), 0x00000007); 1371 REG_WRITE(ah, AR_PHY(0), 0x00000007);
1372
1373 /* Write ADDAC shifts */
1443 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO); 1374 REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_EXTERNAL_RADIO);
1444 ah->eep_ops->set_addac(ah, chan); 1375 ah->eep_ops->set_addac(ah, chan);
1445 1376
@@ -1451,9 +1382,11 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1451 sizeof(u32) * ah->iniAddac.ia_rows * 1382 sizeof(u32) * ah->iniAddac.ia_rows *
1452 ah->iniAddac.ia_columns; 1383 ah->iniAddac.ia_columns;
1453 1384
1385 /* For AR5416 2.0/2.1 */
1454 memcpy(ah->addac5416_21, 1386 memcpy(ah->addac5416_21,
1455 ah->iniAddac.ia_array, addacSize); 1387 ah->iniAddac.ia_array, addacSize);
1456 1388
1389 /* override CLKDRV value at [row, column] = [31, 1] */
1457 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0; 1390 (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
1458 1391
1459 temp.ia_array = ah->addac5416_21; 1392 temp.ia_array = ah->addac5416_21;
@@ -1468,6 +1401,9 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1468 u32 reg = INI_RA(&ah->iniModes, i, 0); 1401 u32 reg = INI_RA(&ah->iniModes, i, 0);
1469 u32 val = INI_RA(&ah->iniModes, i, modesIndex); 1402 u32 val = INI_RA(&ah->iniModes, i, modesIndex);
1470 1403
1404 if (reg == AR_AN_TOP2 && ah->need_an_top2_fixup)
1405 val &= ~AR_AN_TOP2_PWDCLKIND;
1406
1471 REG_WRITE(ah, reg, val); 1407 REG_WRITE(ah, reg, val);
1472 1408
1473 if (reg >= 0x7800 && reg < 0x78a0 1409 if (reg >= 0x7800 && reg < 0x78a0
@@ -1485,6 +1421,11 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1485 AR_SREV_9287_10_OR_LATER(ah)) 1421 AR_SREV_9287_10_OR_LATER(ah))
1486 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites); 1422 REG_WRITE_ARRAY(&ah->iniModesTxGain, modesIndex, regWrites);
1487 1423
1424 if (AR_SREV_9271_10(ah))
1425 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1426 modesIndex, regWrites);
1427
1428 /* Write common array parameters */
1488 for (i = 0; i < ah->iniCommon.ia_rows; i++) { 1429 for (i = 0; i < ah->iniCommon.ia_rows; i++) {
1489 u32 reg = INI_RA(&ah->iniCommon, i, 0); 1430 u32 reg = INI_RA(&ah->iniCommon, i, 0);
1490 u32 val = INI_RA(&ah->iniCommon, i, 1); 1431 u32 val = INI_RA(&ah->iniCommon, i, 1);
@@ -1499,11 +1440,16 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1499 DO_DELAY(regWrites); 1440 DO_DELAY(regWrites);
1500 } 1441 }
1501 1442
1502 ath9k_hw_write_regs(ah, freqIndex, regWrites); 1443 if (AR_SREV_9271(ah)) {
1444 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 1)
1445 REG_WRITE_ARRAY(&ah->iniModes_high_power_tx_gain_9271,
1446 modesIndex, regWrites);
1447 else
1448 REG_WRITE_ARRAY(&ah->iniModes_normal_power_tx_gain_9271,
1449 modesIndex, regWrites);
1450 }
1503 1451
1504 if (AR_SREV_9271_10(ah)) 1452 ath9k_hw_write_regs(ah, freqIndex, regWrites);
1505 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1506 modesIndex, regWrites);
1507 1453
1508 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { 1454 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1509 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, 1455 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
@@ -1517,6 +1463,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1517 if (OLC_FOR_AR9280_20_LATER) 1463 if (OLC_FOR_AR9280_20_LATER)
1518 ath9k_olc_init(ah); 1464 ath9k_olc_init(ah);
1519 1465
1466 /* Set TX power */
1520 ah->eep_ops->set_txpower(ah, chan, 1467 ah->eep_ops->set_txpower(ah, chan,
1521 ath9k_regd_get_ctl(regulatory, chan), 1468 ath9k_regd_get_ctl(regulatory, chan),
1522 channel->max_antenna_gain * 2, 1469 channel->max_antenna_gain * 2,
@@ -1524,6 +1471,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1524 min((u32) MAX_RATE_POWER, 1471 min((u32) MAX_RATE_POWER,
1525 (u32) regulatory->power_limit)); 1472 (u32) regulatory->power_limit));
1526 1473
1474 /* Write analog registers */
1527 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 1475 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
1528 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 1476 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1529 "ar5416SetRfRegs failed\n"); 1477 "ar5416SetRfRegs failed\n");
@@ -1966,6 +1914,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1966 1914
1967 ath9k_hw_mark_phy_inactive(ah); 1915 ath9k_hw_mark_phy_inactive(ah);
1968 1916
1917 /* Only required on the first reset */
1969 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1918 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1970 REG_WRITE(ah, 1919 REG_WRITE(ah,
1971 AR9271_RESET_POWER_DOWN_CONTROL, 1920 AR9271_RESET_POWER_DOWN_CONTROL,
@@ -1978,6 +1927,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1978 return -EINVAL; 1927 return -EINVAL;
1979 } 1928 }
1980 1929
1930 /* Only required on the first reset */
1981 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1931 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1982 ah->htc_reset_init = false; 1932 ah->htc_reset_init = false;
1983 REG_WRITE(ah, 1933 REG_WRITE(ah,
@@ -2438,7 +2388,7 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2438 if (!AR_SREV_9100(ah)) 2388 if (!AR_SREV_9100(ah))
2439 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2389 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2440 2390
2441 if(!AR_SREV_5416(ah)) 2391 if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah))
2442 REG_CLR_BIT(ah, (AR_RTC_RESET), 2392 REG_CLR_BIT(ah, (AR_RTC_RESET),
2443 AR_RTC_RESET_EN); 2393 AR_RTC_RESET_EN);
2444 } 2394 }
@@ -2854,7 +2804,7 @@ EXPORT_SYMBOL(ath9k_hw_getisr);
2854 2804
2855enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 2805enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2856{ 2806{
2857 u32 omask = ah->mask_reg; 2807 enum ath9k_int omask = ah->imask;
2858 u32 mask, mask2; 2808 u32 mask, mask2;
2859 struct ath9k_hw_capabilities *pCap = &ah->caps; 2809 struct ath9k_hw_capabilities *pCap = &ah->caps;
2860 struct ath_common *common = ath9k_hw_common(ah); 2810 struct ath_common *common = ath9k_hw_common(ah);
@@ -2921,15 +2871,11 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
2921 2871
2922 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 2872 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
2923 REG_WRITE(ah, AR_IMR, mask); 2873 REG_WRITE(ah, AR_IMR, mask);
2924 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | 2874 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
2925 AR_IMR_S2_DTIM | 2875 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
2926 AR_IMR_S2_DTIMSYNC | 2876 AR_IMR_S2_TSFOOR | AR_IMR_S2_GTT | AR_IMR_S2_CST);
2927 AR_IMR_S2_CABEND | 2877 ah->imrs2_reg |= mask2;
2928 AR_IMR_S2_CABTO | 2878 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
2929 AR_IMR_S2_TSFOOR |
2930 AR_IMR_S2_GTT | AR_IMR_S2_CST);
2931 REG_WRITE(ah, AR_IMR_S2, mask | mask2);
2932 ah->mask_reg = ints;
2933 2879
2934 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 2880 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
2935 if (ints & ATH9K_INT_TIM_TIMER) 2881 if (ints & ATH9K_INT_TIM_TIMER)
@@ -3219,7 +3165,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3219 else 3165 else
3220 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD; 3166 pCap->tx_triglevel_max = MAX_TX_FIFO_THRESHOLD;
3221 3167
3222 if (AR_SREV_9285_10_OR_LATER(ah)) 3168 if (AR_SREV_9271(ah))
3169 pCap->num_gpio_pins = AR9271_NUM_GPIO;
3170 else if (AR_SREV_9285_10_OR_LATER(ah))
3223 pCap->num_gpio_pins = AR9285_NUM_GPIO; 3171 pCap->num_gpio_pins = AR9285_NUM_GPIO;
3224 else if (AR_SREV_9280_10_OR_LATER(ah)) 3172 else if (AR_SREV_9280_10_OR_LATER(ah))
3225 pCap->num_gpio_pins = AR928X_NUM_GPIO; 3173 pCap->num_gpio_pins = AR928X_NUM_GPIO;
@@ -3246,8 +3194,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
3246 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT; 3194 pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
3247 } 3195 }
3248#endif 3196#endif
3249 3197 if (AR_SREV_9271(ah))
3250 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP; 3198 pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
3199 else
3200 pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
3251 3201
3252 if (AR_SREV_9280(ah) || AR_SREV_9285(ah)) 3202 if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
3253 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS; 3203 pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
@@ -3455,7 +3405,9 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3455 if (gpio >= ah->caps.num_gpio_pins) 3405 if (gpio >= ah->caps.num_gpio_pins)
3456 return 0xffffffff; 3406 return 0xffffffff;
3457 3407
3458 if (AR_SREV_9287_10_OR_LATER(ah)) 3408 if (AR_SREV_9271(ah))
3409 return MS_REG_READ(AR9271, gpio) != 0;
3410 else if (AR_SREV_9287_10_OR_LATER(ah))
3459 return MS_REG_READ(AR9287, gpio) != 0; 3411 return MS_REG_READ(AR9287, gpio) != 0;
3460 else if (AR_SREV_9285_10_OR_LATER(ah)) 3412 else if (AR_SREV_9285_10_OR_LATER(ah))
3461 return MS_REG_READ(AR9285, gpio) != 0; 3413 return MS_REG_READ(AR9285, gpio) != 0;
@@ -3484,6 +3436,9 @@ EXPORT_SYMBOL(ath9k_hw_cfg_output);
3484 3436
3485void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 3437void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3486{ 3438{
3439 if (AR_SREV_9271(ah))
3440 val = ~val;
3441
3487 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 3442 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3488 AR_GPIO_BIT(gpio)); 3443 AR_GPIO_BIT(gpio));
3489} 3444}
@@ -3868,6 +3823,16 @@ void ath_gen_timer_isr(struct ath_hw *ah)
3868} 3823}
3869EXPORT_SYMBOL(ath_gen_timer_isr); 3824EXPORT_SYMBOL(ath_gen_timer_isr);
3870 3825
3826/********/
3827/* HTC */
3828/********/
3829
3830void ath9k_hw_htc_resetinit(struct ath_hw *ah)
3831{
3832 ah->htc_reset_init = true;
3833}
3834EXPORT_SYMBOL(ath9k_hw_htc_resetinit);
3835
3871static struct { 3836static struct {
3872 u32 version; 3837 u32 version;
3873 const char * name; 3838 const char * name;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index dbbf7ca5f97d..f4821cf33b87 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -44,8 +44,6 @@
44 44
45#define AR5416_AR9100_DEVID 0x000b 45#define AR5416_AR9100_DEVID 0x000b
46 46
47#define AR9271_USB 0x9271
48
49#define AR_SUBVENDOR_ID_NOG 0x0e11 47#define AR_SUBVENDOR_ID_NOG 0x0e11
50#define AR_SUBVENDOR_ID_NEW_A 0x7065 48#define AR_SUBVENDOR_ID_NEW_A 0x7065
51#define AR5416_MAGIC 0x19641014 49#define AR5416_MAGIC 0x19641014
@@ -461,6 +459,7 @@ struct ath_hw {
461 459
462 bool sw_mgmt_crypto; 460 bool sw_mgmt_crypto;
463 bool is_pciexpress; 461 bool is_pciexpress;
462 bool need_an_top2_fixup;
464 u16 tx_trig_level; 463 u16 tx_trig_level;
465 u16 rfsilent; 464 u16 rfsilent;
466 u32 rfkill_gpio; 465 u32 rfkill_gpio;
@@ -478,7 +477,8 @@ struct ath_hw {
478 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES]; 477 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
479 478
480 int16_t curchan_rad_index; 479 int16_t curchan_rad_index;
481 u32 mask_reg; 480 enum ath9k_int imask;
481 u32 imrs2_reg;
482 u32 txok_interrupt_mask; 482 u32 txok_interrupt_mask;
483 u32 txerr_interrupt_mask; 483 u32 txerr_interrupt_mask;
484 u32 txdesc_interrupt_mask; 484 u32 txdesc_interrupt_mask;
@@ -598,6 +598,11 @@ struct ath_hw {
598 struct ar5416IniArray iniModes_9271_1_0_only; 598 struct ar5416IniArray iniModes_9271_1_0_only;
599 struct ar5416IniArray iniCckfirNormal; 599 struct ar5416IniArray iniCckfirNormal;
600 struct ar5416IniArray iniCckfirJapan2484; 600 struct ar5416IniArray iniCckfirJapan2484;
601 struct ar5416IniArray iniCommon_normal_cck_fir_coeff_9271;
602 struct ar5416IniArray iniCommon_japan_2484_cck_fir_coeff_9271;
603 struct ar5416IniArray iniModes_9271_ANI_reg;
604 struct ar5416IniArray iniModes_high_power_tx_gain_9271;
605 struct ar5416IniArray iniModes_normal_power_tx_gain_9271;
601 606
602 u32 intr_gen_timer_trigger; 607 u32 intr_gen_timer_trigger;
603 u32 intr_gen_timer_thresh; 608 u32 intr_gen_timer_thresh;
@@ -701,6 +706,9 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah);
701 706
702void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); 707void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
703 708
709/* HTC */
710void ath9k_hw_htc_resetinit(struct ath_hw *ah);
711
704#define ATH_PCIE_CAP_LINK_CTRL 0x70 712#define ATH_PCIE_CAP_LINK_CTRL 0x70
705#define ATH_PCIE_CAP_LINK_L0S 1 713#define ATH_PCIE_CAP_LINK_L0S 1
706#define ATH_PCIE_CAP_LINK_L1 2 714#define ATH_PCIE_CAP_LINK_L1 2
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 3d4d897add6d..b78308c3c4d4 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -760,6 +760,9 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
760 760
761 tasklet_kill(&sc->intr_tq); 761 tasklet_kill(&sc->intr_tq);
762 tasklet_kill(&sc->bcon_tasklet); 762 tasklet_kill(&sc->bcon_tasklet);
763
764 kfree(sc->sc_ah);
765 sc->sc_ah = NULL;
763} 766}
764 767
765void ath9k_deinit_device(struct ath_softc *sc) 768void ath9k_deinit_device(struct ath_softc *sc)
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
index 8a3bf3ab998d..455e9d3b3f13 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
@@ -4184,7 +4184,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4184 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 4184 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4185 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 4185 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4186 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 4186 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4187 { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 4187 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
4188 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 4188 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4189 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 4189 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4190 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 4190 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4198,8 +4198,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4198 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 4198 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4199 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 4199 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4200 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 4200 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4201 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 4201 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
4202 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4202 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4203 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4203 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4204 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 4204 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4205 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 4205 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4312,7 +4312,7 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4312 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 4312 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
4313 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 4313 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
4314 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 4314 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
4315 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 4315 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
4316 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 4316 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
4317 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 4317 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
4318 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 4318 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -4326,8 +4326,8 @@ static const u_int32_t ar9285Modes_9285_1_2[][6] = {
4326 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 4326 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
4327 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 4327 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
4328 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 4328 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
4329 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 4329 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
4330 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4330 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
4331 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 4331 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
4332 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 4332 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
4333 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 4333 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -4731,17 +4731,12 @@ static const u_int32_t ar9285Common_9285_1_2[][2] = {
4731 { 0x00007808, 0x54214514 }, 4731 { 0x00007808, 0x54214514 },
4732 { 0x0000780c, 0x02025830 }, 4732 { 0x0000780c, 0x02025830 },
4733 { 0x00007810, 0x71c0d388 }, 4733 { 0x00007810, 0x71c0d388 },
4734 { 0x00007814, 0x924934a8 },
4735 { 0x0000781c, 0x00000000 }, 4734 { 0x0000781c, 0x00000000 },
4736 { 0x00007824, 0x00d86fff }, 4735 { 0x00007824, 0x00d86fff },
4737 { 0x00007828, 0x26d2491b },
4738 { 0x0000782c, 0x6e36d97b }, 4736 { 0x0000782c, 0x6e36d97b },
4739 { 0x00007830, 0xedb6d96e },
4740 { 0x00007834, 0x71400087 }, 4737 { 0x00007834, 0x71400087 },
4741 { 0x0000783c, 0x0001fffe },
4742 { 0x00007840, 0xffeb1a20 },
4743 { 0x00007844, 0x000c0db6 }, 4738 { 0x00007844, 0x000c0db6 },
4744 { 0x00007848, 0x6db61b6f }, 4739 { 0x00007848, 0x6db6246f },
4745 { 0x0000784c, 0x6d9b66db }, 4740 { 0x0000784c, 0x6d9b66db },
4746 { 0x00007850, 0x6d8c6dba }, 4741 { 0x00007850, 0x6d8c6dba },
4747 { 0x00007854, 0x00040000 }, 4742 { 0x00007854, 0x00040000 },
@@ -4777,7 +4772,12 @@ static const u_int32_t ar9285Modes_high_power_tx_gain_9285_1_2[][6] = {
4777 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4772 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4778 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4773 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4779 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4774 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4775 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
4776 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
4777 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
4780 { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 }, 4778 { 0x00007838, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803, 0xfac68803 },
4779 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
4780 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
4781 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe }, 4781 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
4782 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 }, 4782 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
4783 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 }, 4783 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
@@ -4813,7 +4813,12 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4813 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4813 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4814 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4814 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4815 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 }, 4815 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4816 { 0x00007814, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8, 0x924934a8 },
4817 { 0x00007828, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b, 0x26d2491b },
4818 { 0x00007830, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e, 0xedb6d96e },
4816 { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 }, 4819 { 0x00007838, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801, 0xfac68801 },
4820 { 0x0000783c, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe, 0x0001fffe },
4821 { 0x00007840, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20, 0xffeb1a20 },
4817 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 }, 4822 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
4818 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 }, 4823 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
4819 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 }, 4824 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
@@ -4825,6 +4830,86 @@ static const u_int32_t ar9285Modes_original_tx_gain_9285_1_2[][6] = {
4825 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c }, 4830 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4826}; 4831};
4827 4832
4833static const u_int32_t ar9285Modes_XE2_0_normal_power[][6] = {
4834 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4835 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
4836 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
4837 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
4838 { 0x0000a310, 0x00000000, 0x00000000, 0x00022618, 0x00022618, 0x00000000 },
4839 { 0x0000a314, 0x00000000, 0x00000000, 0x0002a6c9, 0x0002a6c9, 0x00000000 },
4840 { 0x0000a318, 0x00000000, 0x00000000, 0x00031710, 0x00031710, 0x00000000 },
4841 { 0x0000a31c, 0x00000000, 0x00000000, 0x00035718, 0x00035718, 0x00000000 },
4842 { 0x0000a320, 0x00000000, 0x00000000, 0x00038758, 0x00038758, 0x00000000 },
4843 { 0x0000a324, 0x00000000, 0x00000000, 0x0003c75a, 0x0003c75a, 0x00000000 },
4844 { 0x0000a328, 0x00000000, 0x00000000, 0x0004075c, 0x0004075c, 0x00000000 },
4845 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004475e, 0x0004475e, 0x00000000 },
4846 { 0x0000a330, 0x00000000, 0x00000000, 0x0004679f, 0x0004679f, 0x00000000 },
4847 { 0x0000a334, 0x00000000, 0x00000000, 0x000487df, 0x000487df, 0x00000000 },
4848 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
4849 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
4850 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4851 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4852 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4853 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4854 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4855 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4856 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
4857 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
4858 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6dbae },
4859 { 0x00007838, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441, 0xdac71441 },
4860 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
4861 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
4862 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
4863 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
4864 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a21a652, 0x0a21a652, 0x0a22a652 },
4865 { 0x0000a278, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
4866 { 0x0000a27c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c, 0x050e039c },
4867 { 0x0000a394, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
4868 { 0x0000a398, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4869 { 0x0000a3dc, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c, 0x39ce739c },
4870 { 0x0000a3e0, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c, 0x0000039c },
4871};
4872
4873static const u_int32_t ar9285Modes_XE2_0_high_power[][6] = {
4874 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
4875 { 0x0000a304, 0x00000000, 0x00000000, 0x00006200, 0x00006200, 0x00000000 },
4876 { 0x0000a308, 0x00000000, 0x00000000, 0x00008201, 0x00008201, 0x00000000 },
4877 { 0x0000a30c, 0x00000000, 0x00000000, 0x0000b240, 0x0000b240, 0x00000000 },
4878 { 0x0000a310, 0x00000000, 0x00000000, 0x0000d241, 0x0000d241, 0x00000000 },
4879 { 0x0000a314, 0x00000000, 0x00000000, 0x0000f600, 0x0000f600, 0x00000000 },
4880 { 0x0000a318, 0x00000000, 0x00000000, 0x00012800, 0x00012800, 0x00000000 },
4881 { 0x0000a31c, 0x00000000, 0x00000000, 0x00016802, 0x00016802, 0x00000000 },
4882 { 0x0000a320, 0x00000000, 0x00000000, 0x0001b805, 0x0001b805, 0x00000000 },
4883 { 0x0000a324, 0x00000000, 0x00000000, 0x00021a80, 0x00021a80, 0x00000000 },
4884 { 0x0000a328, 0x00000000, 0x00000000, 0x00028b00, 0x00028b00, 0x00000000 },
4885 { 0x0000a32c, 0x00000000, 0x00000000, 0x0002ab40, 0x0002ab40, 0x00000000 },
4886 { 0x0000a330, 0x00000000, 0x00000000, 0x0002cd80, 0x0002cd80, 0x00000000 },
4887 { 0x0000a334, 0x00000000, 0x00000000, 0x00033d82, 0x00033d82, 0x00000000 },
4888 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
4889 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
4890 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4891 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4892 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4893 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4894 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4895 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
4896 { 0x00007814, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8, 0x92497ca8 },
4897 { 0x00007828, 0x4ad2491b, 0x4ad2491b, 0x2ad2491b, 0x4ad2491b, 0x4ad2491b },
4898 { 0x00007830, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e, 0xedb6da6e },
4899 { 0x00007838, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443, 0xdac71443 },
4900 { 0x0000783c, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe, 0x2481f6fe },
4901 { 0x00007840, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c, 0xba5f638c },
4902 { 0x0000786c, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe, 0x08609ebe },
4903 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
4904 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a216652, 0x0a216652, 0x0a22a652 },
4905 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
4906 { 0x0000a27c, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7, 0x050380e7 },
4907 { 0x0000a394, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
4908 { 0x0000a398, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
4909 { 0x0000a3dc, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
4910 { 0x0000a3e0, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7, 0x000000e7 },
4911};
4912
4828static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = { 4913static const u_int32_t ar9285PciePhy_clkreq_always_on_L1_9285_1_2[][2] = {
4829 {0x00004040, 0x9248fd00 }, 4914 {0x00004040, 0x9248fd00 },
4830 {0x00004040, 0x24924924 }, 4915 {0x00004040, 0x24924924 },
@@ -6441,7 +6526,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6441 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 6526 { 0x00009a44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
6442 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 6527 { 0x00009a48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
6443 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 6528 { 0x00009a4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
6444 { 0x00009a50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 6529 { 0x00009a50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
6445 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 6530 { 0x00009a54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
6446 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 6531 { 0x00009a58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
6447 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 6532 { 0x00009a5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6455,8 +6540,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6455 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 6540 { 0x00009a7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
6456 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 6541 { 0x00009a80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
6457 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 6542 { 0x00009a84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
6458 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 6543 { 0x00009a88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
6459 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 6544 { 0x00009a8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
6460 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 6545 { 0x00009a90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
6461 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 6546 { 0x00009a94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
6462 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 6547 { 0x00009a98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6569,7 +6654,7 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6569 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 }, 6654 { 0x0000aa44, 0x00000000, 0x00000000, 0x000581a8, 0x000581a8, 0x00000000 },
6570 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 }, 6655 { 0x0000aa48, 0x00000000, 0x00000000, 0x00058284, 0x00058284, 0x00000000 },
6571 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 }, 6656 { 0x0000aa4c, 0x00000000, 0x00000000, 0x00058288, 0x00058288, 0x00000000 },
6572 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058220, 0x00058220, 0x00000000 }, 6657 { 0x0000aa50, 0x00000000, 0x00000000, 0x00058224, 0x00058224, 0x00000000 },
6573 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 }, 6658 { 0x0000aa54, 0x00000000, 0x00000000, 0x00058290, 0x00058290, 0x00000000 },
6574 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 }, 6659 { 0x0000aa58, 0x00000000, 0x00000000, 0x00058300, 0x00058300, 0x00000000 },
6575 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 }, 6660 { 0x0000aa5c, 0x00000000, 0x00000000, 0x00058304, 0x00058304, 0x00000000 },
@@ -6583,8 +6668,8 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6583 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 }, 6668 { 0x0000aa7c, 0x00000000, 0x00000000, 0x0006870c, 0x0006870c, 0x00000000 },
6584 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 }, 6669 { 0x0000aa80, 0x00000000, 0x00000000, 0x00068780, 0x00068780, 0x00000000 },
6585 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 }, 6670 { 0x0000aa84, 0x00000000, 0x00000000, 0x00068784, 0x00068784, 0x00000000 },
6586 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 }, 6671 { 0x0000aa88, 0x00000000, 0x00000000, 0x00078b00, 0x00078b00, 0x00000000 },
6587 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 6672 { 0x0000aa8c, 0x00000000, 0x00000000, 0x00078b04, 0x00078b04, 0x00000000 },
6588 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 }, 6673 { 0x0000aa90, 0x00000000, 0x00000000, 0x00078b08, 0x00078b08, 0x00000000 },
6589 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 }, 6674 { 0x0000aa94, 0x00000000, 0x00000000, 0x00078b0c, 0x00078b0c, 0x00000000 },
6590 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 }, 6675 { 0x0000aa98, 0x00000000, 0x00000000, 0x00078b80, 0x00078b80, 0x00000000 },
@@ -6683,25 +6768,6 @@ static const u_int32_t ar9271Modes_9271[][6] = {
6683 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 6768 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
6684 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 6769 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
6685 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 }, 6770 { 0x0000a250, 0x0004f000, 0x0004f000, 0x0004a000, 0x0004a000, 0x0004a000 },
6686 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
6687 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
6688 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
6689 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
6690 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
6691 { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
6692 { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
6693 { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
6694 { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
6695 { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
6696 { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
6697 { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
6698 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
6699 { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
6700 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
6701 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
6702 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
6703 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
6704 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
6705 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 6771 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
6706}; 6772};
6707 6773
@@ -6879,7 +6945,7 @@ static const u_int32_t ar9271Common_9271[][2] = {
6879 { 0x00008258, 0x00000000 }, 6945 { 0x00008258, 0x00000000 },
6880 { 0x0000825c, 0x400000ff }, 6946 { 0x0000825c, 0x400000ff },
6881 { 0x00008260, 0x00080922 }, 6947 { 0x00008260, 0x00080922 },
6882 { 0x00008264, 0x88a00010 }, 6948 { 0x00008264, 0xa8a00010 },
6883 { 0x00008270, 0x00000000 }, 6949 { 0x00008270, 0x00000000 },
6884 { 0x00008274, 0x40000000 }, 6950 { 0x00008274, 0x40000000 },
6885 { 0x00008278, 0x003e4180 }, 6951 { 0x00008278, 0x003e4180 },
@@ -6910,13 +6976,10 @@ static const u_int32_t ar9271Common_9271[][2] = {
6910 { 0x00007810, 0x71c0d388 }, 6976 { 0x00007810, 0x71c0d388 },
6911 { 0x00007814, 0x924934a8 }, 6977 { 0x00007814, 0x924934a8 },
6912 { 0x0000781c, 0x00000000 }, 6978 { 0x0000781c, 0x00000000 },
6913 { 0x00007820, 0x00000c04 },
6914 { 0x00007824, 0x00d8abff },
6915 { 0x00007828, 0x66964300 }, 6979 { 0x00007828, 0x66964300 },
6916 { 0x0000782c, 0x8db6d961 }, 6980 { 0x0000782c, 0x8db6d961 },
6917 { 0x00007830, 0x8db6d96c }, 6981 { 0x00007830, 0x8db6d96c },
6918 { 0x00007834, 0x6140008b }, 6982 { 0x00007834, 0x6140008b },
6919 { 0x00007838, 0x00000029 },
6920 { 0x0000783c, 0x72ee0a72 }, 6983 { 0x0000783c, 0x72ee0a72 },
6921 { 0x00007840, 0xbbfffffc }, 6984 { 0x00007840, 0xbbfffffc },
6922 { 0x00007844, 0x000c0db6 }, 6985 { 0x00007844, 0x000c0db6 },
@@ -6929,7 +6992,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
6929 { 0x00007860, 0x21084210 }, 6992 { 0x00007860, 0x21084210 },
6930 { 0x00007864, 0xf7d7ffde }, 6993 { 0x00007864, 0xf7d7ffde },
6931 { 0x00007868, 0xc2034080 }, 6994 { 0x00007868, 0xc2034080 },
6932 { 0x0000786c, 0x48609eb4 },
6933 { 0x00007870, 0x10142c00 }, 6995 { 0x00007870, 0x10142c00 },
6934 { 0x00009808, 0x00000000 }, 6996 { 0x00009808, 0x00000000 },
6935 { 0x0000980c, 0xafe68e30 }, 6997 { 0x0000980c, 0xafe68e30 },
@@ -6982,9 +7044,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
6982 { 0x000099e8, 0x3c466478 }, 7044 { 0x000099e8, 0x3c466478 },
6983 { 0x000099ec, 0x0cc80caa }, 7045 { 0x000099ec, 0x0cc80caa },
6984 { 0x000099f0, 0x00000000 }, 7046 { 0x000099f0, 0x00000000 },
6985 { 0x0000a1f4, 0x00000000 },
6986 { 0x0000a1f8, 0x71733d01 },
6987 { 0x0000a1fc, 0xd0ad5c12 },
6988 { 0x0000a208, 0x803e68c8 }, 7047 { 0x0000a208, 0x803e68c8 },
6989 { 0x0000a210, 0x4080a333 }, 7048 { 0x0000a210, 0x4080a333 },
6990 { 0x0000a214, 0x00206c10 }, 7049 { 0x0000a214, 0x00206c10 },
@@ -7004,13 +7063,9 @@ static const u_int32_t ar9271Common_9271[][2] = {
7004 { 0x0000a260, 0xdfa90f01 }, 7063 { 0x0000a260, 0xdfa90f01 },
7005 { 0x0000a268, 0x00000000 }, 7064 { 0x0000a268, 0x00000000 },
7006 { 0x0000a26c, 0x0ebae9e6 }, 7065 { 0x0000a26c, 0x0ebae9e6 },
7007 { 0x0000a278, 0x3bdef7bd },
7008 { 0x0000a27c, 0x050e83bd },
7009 { 0x0000a388, 0x0c000000 }, 7066 { 0x0000a388, 0x0c000000 },
7010 { 0x0000a38c, 0x20202020 }, 7067 { 0x0000a38c, 0x20202020 },
7011 { 0x0000a390, 0x20202020 }, 7068 { 0x0000a390, 0x20202020 },
7012 { 0x0000a394, 0x3bdef7bd },
7013 { 0x0000a398, 0x000003bd },
7014 { 0x0000a39c, 0x00000001 }, 7069 { 0x0000a39c, 0x00000001 },
7015 { 0x0000a3a0, 0x00000000 }, 7070 { 0x0000a3a0, 0x00000000 },
7016 { 0x0000a3a4, 0x00000000 }, 7071 { 0x0000a3a4, 0x00000000 },
@@ -7025,8 +7080,6 @@ static const u_int32_t ar9271Common_9271[][2] = {
7025 { 0x0000a3cc, 0x20202020 }, 7080 { 0x0000a3cc, 0x20202020 },
7026 { 0x0000a3d0, 0x20202020 }, 7081 { 0x0000a3d0, 0x20202020 },
7027 { 0x0000a3d4, 0x20202020 }, 7082 { 0x0000a3d4, 0x20202020 },
7028 { 0x0000a3dc, 0x3bdef7bd },
7029 { 0x0000a3e0, 0x000003bd },
7030 { 0x0000a3e4, 0x00000000 }, 7083 { 0x0000a3e4, 0x00000000 },
7031 { 0x0000a3e8, 0x18c43433 }, 7084 { 0x0000a3e8, 0x18c43433 },
7032 { 0x0000a3ec, 0x00f70081 }, 7085 { 0x0000a3ec, 0x00f70081 },
@@ -7046,7 +7099,102 @@ static const u_int32_t ar9271Common_9271[][2] = {
7046 { 0x0000d384, 0xf3307ff0 }, 7099 { 0x0000d384, 0xf3307ff0 },
7047}; 7100};
7048 7101
7102static const u_int32_t ar9271Common_normal_cck_fir_coeff_9271[][2] = {
7103 { 0x0000a1f4, 0x00fffeff },
7104 { 0x0000a1f8, 0x00f5f9ff },
7105 { 0x0000a1fc, 0xb79f6427 },
7106};
7107
7108static const u_int32_t ar9271Common_japan_2484_cck_fir_coeff_9271[][2] = {
7109 { 0x0000a1f4, 0x00000000 },
7110 { 0x0000a1f8, 0xefff0301 },
7111 { 0x0000a1fc, 0xca9228ee },
7112};
7113
7049static const u_int32_t ar9271Modes_9271_1_0_only[][6] = { 7114static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
7050 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 }, 7115 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
7051 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 7116 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
7052}; 7117};
7118
7119static const u_int32_t ar9271Modes_9271_ANI_reg[][6] = {
7120 { 0x00009850, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2, 0x6d4000e2 },
7121 { 0x0000985c, 0x3139605e, 0x3139605e, 0x3137605e, 0x3137605e, 0x3139605e },
7122 { 0x00009858, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
7123 { 0x0000986c, 0x06903881, 0x06903881, 0x06903881, 0x06903881, 0x06903881 },
7124 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
7125 { 0x0000a208, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8, 0x803e68c8 },
7126 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
7127 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
7128};
7129
7130static const u_int32_t ar9271Modes_normal_power_tx_gain_9271[][6] = {
7131 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
7132 { 0x0000a304, 0x00000000, 0x00000000, 0x00009200, 0x00009200, 0x00000000 },
7133 { 0x0000a308, 0x00000000, 0x00000000, 0x00010208, 0x00010208, 0x00000000 },
7134 { 0x0000a30c, 0x00000000, 0x00000000, 0x00019608, 0x00019608, 0x00000000 },
7135 { 0x0000a310, 0x00000000, 0x00000000, 0x0001e610, 0x0001e610, 0x00000000 },
7136 { 0x0000a314, 0x00000000, 0x00000000, 0x0002d6d0, 0x0002d6d0, 0x00000000 },
7137 { 0x0000a318, 0x00000000, 0x00000000, 0x00039758, 0x00039758, 0x00000000 },
7138 { 0x0000a31c, 0x00000000, 0x00000000, 0x0003b759, 0x0003b759, 0x00000000 },
7139 { 0x0000a320, 0x00000000, 0x00000000, 0x0003d75a, 0x0003d75a, 0x00000000 },
7140 { 0x0000a324, 0x00000000, 0x00000000, 0x0004175c, 0x0004175c, 0x00000000 },
7141 { 0x0000a328, 0x00000000, 0x00000000, 0x0004575e, 0x0004575e, 0x00000000 },
7142 { 0x0000a32c, 0x00000000, 0x00000000, 0x0004979f, 0x0004979f, 0x00000000 },
7143 { 0x0000a330, 0x00000000, 0x00000000, 0x0004d7df, 0x0004d7df, 0x00000000 },
7144 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
7145 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
7146 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
7147 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7148 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7149 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7150 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7151 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7152 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7153 { 0x00007838, 0x00000029, 0x00000029, 0x00000029, 0x00000029, 0x00000029 },
7154 { 0x00007824, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff, 0x00d8abff },
7155 { 0x0000786c, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4, 0x48609eb4 },
7156 { 0x00007820, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04, 0x00000c04 },
7157 { 0x0000a274, 0x0a21c652, 0x0a21c652, 0x0a218652, 0x0a218652, 0x0a22a652 },
7158 { 0x0000a278, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
7159 { 0x0000a27c, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd, 0x050e83bd },
7160 { 0x0000a394, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
7161 { 0x0000a398, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
7162 { 0x0000a3dc, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd, 0x3bdef7bd },
7163 { 0x0000a3e0, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd, 0x000003bd },
7164};
7165
7166static const u_int32_t ar9271Modes_high_power_tx_gain_9271[][6] = {
7167 { 0x0000a300, 0x00000000, 0x00000000, 0x00010000, 0x00010000, 0x00000000 },
7168 { 0x0000a304, 0x00000000, 0x00000000, 0x00016200, 0x00016200, 0x00000000 },
7169 { 0x0000a308, 0x00000000, 0x00000000, 0x00018201, 0x00018201, 0x00000000 },
7170 { 0x0000a30c, 0x00000000, 0x00000000, 0x0001b240, 0x0001b240, 0x00000000 },
7171 { 0x0000a310, 0x00000000, 0x00000000, 0x0001d241, 0x0001d241, 0x00000000 },
7172 { 0x0000a314, 0x00000000, 0x00000000, 0x0001f600, 0x0001f600, 0x00000000 },
7173 { 0x0000a318, 0x00000000, 0x00000000, 0x00022800, 0x00022800, 0x00000000 },
7174 { 0x0000a31c, 0x00000000, 0x00000000, 0x00026802, 0x00026802, 0x00000000 },
7175 { 0x0000a320, 0x00000000, 0x00000000, 0x0002b805, 0x0002b805, 0x00000000 },
7176 { 0x0000a324, 0x00000000, 0x00000000, 0x0002ea41, 0x0002ea41, 0x00000000 },
7177 { 0x0000a328, 0x00000000, 0x00000000, 0x00038b00, 0x00038b00, 0x00000000 },
7178 { 0x0000a32c, 0x00000000, 0x00000000, 0x0003ab40, 0x0003ab40, 0x00000000 },
7179 { 0x0000a330, 0x00000000, 0x00000000, 0x0003cd80, 0x0003cd80, 0x00000000 },
7180 { 0x0000a334, 0x000368de, 0x000368de, 0x000368de, 0x000368de, 0x00000000 },
7181 { 0x0000a338, 0x0003891e, 0x0003891e, 0x0003891e, 0x0003891e, 0x00000000 },
7182 { 0x0000a33c, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x0003a95e, 0x00000000 },
7183 { 0x0000a340, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7184 { 0x0000a344, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7185 { 0x0000a348, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7186 { 0x0000a34c, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7187 { 0x0000a350, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7188 { 0x0000a354, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x0003e9df, 0x00000000 },
7189 { 0x00007838, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b, 0x0000002b },
7190 { 0x00007824, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff, 0x00d8a7ff },
7191 { 0x0000786c, 0x08609eb6, 0x08609eb6, 0x08609eba, 0x08609eba, 0x08609eb6 },
7192 { 0x00007820, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00, 0x00000c00 },
7193 { 0x0000a274, 0x0a22a652, 0x0a22a652, 0x0a212652, 0x0a212652, 0x0a22a652 },
7194 { 0x0000a278, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7, 0x0e739ce7 },
7195 { 0x0000a27c, 0x05018063, 0x05038063, 0x05018063, 0x05018063, 0x05018063 },
7196 { 0x0000a394, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
7197 { 0x0000a398, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
7198 { 0x0000a3dc, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63, 0x06318c63 },
7199 { 0x0000a3e0, 0x00000063, 0x00000063, 0x00000063, 0x00000063, 0x00000063 },
7200};
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index efc420cd42bf..4a2060e5a777 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -31,8 +31,10 @@ static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
31 REG_WRITE(ah, AR_IMR_S1, 31 REG_WRITE(ah, AR_IMR_S1,
32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR) 32 SM(ah->txerr_interrupt_mask, AR_IMR_S1_QCU_TXERR)
33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL)); 33 | SM(ah->txeol_interrupt_mask, AR_IMR_S1_QCU_TXEOL));
34 REG_RMW_FIELD(ah, AR_IMR_S2, 34
35 AR_IMR_S2_QCU_TXURN, ah->txurn_interrupt_mask); 35 ah->imrs2_reg &= ~AR_IMR_S2_QCU_TXURN;
36 ah->imrs2_reg |= (ah->txurn_interrupt_mask & AR_IMR_S2_QCU_TXURN);
37 REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
36} 38}
37 39
38u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) 40u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
@@ -103,7 +105,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
103 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 105 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
104 return false; 106 return false;
105 107
106 omask = ath9k_hw_set_interrupts(ah, ah->mask_reg & ~ATH9K_INT_GLOBAL); 108 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL);
107 109
108 txcfg = REG_READ(ah, AR_TXCFG); 110 txcfg = REG_READ(ah, AR_TXCFG);
109 curLevel = MS(txcfg, AR_FTRIG); 111 curLevel = MS(txcfg, AR_FTRIG);
@@ -244,79 +246,80 @@ void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
244} 246}
245EXPORT_SYMBOL(ath9k_hw_cleartxdesc); 247EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
246 248
247int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) 249int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
250 struct ath_tx_status *ts)
248{ 251{
249 struct ar5416_desc *ads = AR5416DESC(ds); 252 struct ar5416_desc *ads = AR5416DESC(ds);
250 253
251 if ((ads->ds_txstatus9 & AR_TxDone) == 0) 254 if ((ads->ds_txstatus9 & AR_TxDone) == 0)
252 return -EINPROGRESS; 255 return -EINPROGRESS;
253 256
254 ds->ds_txstat.ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum); 257 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
255 ds->ds_txstat.ts_tstamp = ads->AR_SendTimestamp; 258 ts->ts_tstamp = ads->AR_SendTimestamp;
256 ds->ds_txstat.ts_status = 0; 259 ts->ts_status = 0;
257 ds->ds_txstat.ts_flags = 0; 260 ts->ts_flags = 0;
258 261
259 if (ads->ds_txstatus1 & AR_FrmXmitOK) 262 if (ads->ds_txstatus1 & AR_FrmXmitOK)
260 ds->ds_txstat.ts_status |= ATH9K_TX_ACKED; 263 ts->ts_status |= ATH9K_TX_ACKED;
261 if (ads->ds_txstatus1 & AR_ExcessiveRetries) 264 if (ads->ds_txstatus1 & AR_ExcessiveRetries)
262 ds->ds_txstat.ts_status |= ATH9K_TXERR_XRETRY; 265 ts->ts_status |= ATH9K_TXERR_XRETRY;
263 if (ads->ds_txstatus1 & AR_Filtered) 266 if (ads->ds_txstatus1 & AR_Filtered)
264 ds->ds_txstat.ts_status |= ATH9K_TXERR_FILT; 267 ts->ts_status |= ATH9K_TXERR_FILT;
265 if (ads->ds_txstatus1 & AR_FIFOUnderrun) { 268 if (ads->ds_txstatus1 & AR_FIFOUnderrun) {
266 ds->ds_txstat.ts_status |= ATH9K_TXERR_FIFO; 269 ts->ts_status |= ATH9K_TXERR_FIFO;
267 ath9k_hw_updatetxtriglevel(ah, true); 270 ath9k_hw_updatetxtriglevel(ah, true);
268 } 271 }
269 if (ads->ds_txstatus9 & AR_TxOpExceeded) 272 if (ads->ds_txstatus9 & AR_TxOpExceeded)
270 ds->ds_txstat.ts_status |= ATH9K_TXERR_XTXOP; 273 ts->ts_status |= ATH9K_TXERR_XTXOP;
271 if (ads->ds_txstatus1 & AR_TxTimerExpired) 274 if (ads->ds_txstatus1 & AR_TxTimerExpired)
272 ds->ds_txstat.ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 275 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
273 276
274 if (ads->ds_txstatus1 & AR_DescCfgErr) 277 if (ads->ds_txstatus1 & AR_DescCfgErr)
275 ds->ds_txstat.ts_flags |= ATH9K_TX_DESC_CFG_ERR; 278 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
276 if (ads->ds_txstatus1 & AR_TxDataUnderrun) { 279 if (ads->ds_txstatus1 & AR_TxDataUnderrun) {
277 ds->ds_txstat.ts_flags |= ATH9K_TX_DATA_UNDERRUN; 280 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
278 ath9k_hw_updatetxtriglevel(ah, true); 281 ath9k_hw_updatetxtriglevel(ah, true);
279 } 282 }
280 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { 283 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) {
281 ds->ds_txstat.ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 284 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
282 ath9k_hw_updatetxtriglevel(ah, true); 285 ath9k_hw_updatetxtriglevel(ah, true);
283 } 286 }
284 if (ads->ds_txstatus0 & AR_TxBaStatus) { 287 if (ads->ds_txstatus0 & AR_TxBaStatus) {
285 ds->ds_txstat.ts_flags |= ATH9K_TX_BA; 288 ts->ts_flags |= ATH9K_TX_BA;
286 ds->ds_txstat.ba_low = ads->AR_BaBitmapLow; 289 ts->ba_low = ads->AR_BaBitmapLow;
287 ds->ds_txstat.ba_high = ads->AR_BaBitmapHigh; 290 ts->ba_high = ads->AR_BaBitmapHigh;
288 } 291 }
289 292
290 ds->ds_txstat.ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); 293 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx);
291 switch (ds->ds_txstat.ts_rateindex) { 294 switch (ts->ts_rateindex) {
292 case 0: 295 case 0:
293 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); 296 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0);
294 break; 297 break;
295 case 1: 298 case 1:
296 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1); 299 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
297 break; 300 break;
298 case 2: 301 case 2:
299 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2); 302 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
300 break; 303 break;
301 case 3: 304 case 3:
302 ds->ds_txstat.ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3); 305 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
303 break; 306 break;
304 } 307 }
305 308
306 ds->ds_txstat.ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined); 309 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
307 ds->ds_txstat.ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00); 310 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
308 ds->ds_txstat.ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01); 311 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
309 ds->ds_txstat.ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02); 312 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
310 ds->ds_txstat.ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10); 313 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
311 ds->ds_txstat.ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11); 314 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
312 ds->ds_txstat.ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12); 315 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
313 ds->ds_txstat.evm0 = ads->AR_TxEVM0; 316 ts->evm0 = ads->AR_TxEVM0;
314 ds->ds_txstat.evm1 = ads->AR_TxEVM1; 317 ts->evm1 = ads->AR_TxEVM1;
315 ds->ds_txstat.evm2 = ads->AR_TxEVM2; 318 ts->evm2 = ads->AR_TxEVM2;
316 ds->ds_txstat.ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt); 319 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
317 ds->ds_txstat.ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt); 320 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
318 ds->ds_txstat.ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt); 321 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
319 ds->ds_txstat.ts_antenna = 0; 322 ts->ts_antenna = 0;
320 323
321 return 0; 324 return 0;
322} 325}
@@ -349,7 +352,7 @@ void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
349 352
350 ads->ds_ctl6 = SM(keyType, AR_EncrType); 353 ads->ds_ctl6 = SM(keyType, AR_EncrType);
351 354
352 if (AR_SREV_9285(ah)) { 355 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) {
353 ads->ds_ctl8 = 0; 356 ads->ds_ctl8 = 0;
354 ads->ds_ctl9 = 0; 357 ads->ds_ctl9 = 0;
355 ads->ds_ctl10 = 0; 358 ads->ds_ctl10 = 0;
@@ -856,7 +859,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
856EXPORT_SYMBOL(ath9k_hw_resettxqueue); 859EXPORT_SYMBOL(ath9k_hw_resettxqueue);
857 860
858int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 861int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
859 u32 pa, struct ath_desc *nds, u64 tsf) 862 struct ath_rx_status *rs, u64 tsf)
860{ 863{
861 struct ar5416_desc ads; 864 struct ar5416_desc ads;
862 struct ar5416_desc *adsp = AR5416DESC(ds); 865 struct ar5416_desc *adsp = AR5416DESC(ds);
@@ -867,70 +870,70 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
867 870
868 ads.u.rx = adsp->u.rx; 871 ads.u.rx = adsp->u.rx;
869 872
870 ds->ds_rxstat.rs_status = 0; 873 rs->rs_status = 0;
871 ds->ds_rxstat.rs_flags = 0; 874 rs->rs_flags = 0;
872 875
873 ds->ds_rxstat.rs_datalen = ads.ds_rxstatus1 & AR_DataLen; 876 rs->rs_datalen = ads.ds_rxstatus1 & AR_DataLen;
874 ds->ds_rxstat.rs_tstamp = ads.AR_RcvTimestamp; 877 rs->rs_tstamp = ads.AR_RcvTimestamp;
875 878
876 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) { 879 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) {
877 ds->ds_rxstat.rs_rssi = ATH9K_RSSI_BAD; 880 rs->rs_rssi = ATH9K_RSSI_BAD;
878 ds->ds_rxstat.rs_rssi_ctl0 = ATH9K_RSSI_BAD; 881 rs->rs_rssi_ctl0 = ATH9K_RSSI_BAD;
879 ds->ds_rxstat.rs_rssi_ctl1 = ATH9K_RSSI_BAD; 882 rs->rs_rssi_ctl1 = ATH9K_RSSI_BAD;
880 ds->ds_rxstat.rs_rssi_ctl2 = ATH9K_RSSI_BAD; 883 rs->rs_rssi_ctl2 = ATH9K_RSSI_BAD;
881 ds->ds_rxstat.rs_rssi_ext0 = ATH9K_RSSI_BAD; 884 rs->rs_rssi_ext0 = ATH9K_RSSI_BAD;
882 ds->ds_rxstat.rs_rssi_ext1 = ATH9K_RSSI_BAD; 885 rs->rs_rssi_ext1 = ATH9K_RSSI_BAD;
883 ds->ds_rxstat.rs_rssi_ext2 = ATH9K_RSSI_BAD; 886 rs->rs_rssi_ext2 = ATH9K_RSSI_BAD;
884 } else { 887 } else {
885 ds->ds_rxstat.rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined); 888 rs->rs_rssi = MS(ads.ds_rxstatus4, AR_RxRSSICombined);
886 ds->ds_rxstat.rs_rssi_ctl0 = MS(ads.ds_rxstatus0, 889 rs->rs_rssi_ctl0 = MS(ads.ds_rxstatus0,
887 AR_RxRSSIAnt00); 890 AR_RxRSSIAnt00);
888 ds->ds_rxstat.rs_rssi_ctl1 = MS(ads.ds_rxstatus0, 891 rs->rs_rssi_ctl1 = MS(ads.ds_rxstatus0,
889 AR_RxRSSIAnt01); 892 AR_RxRSSIAnt01);
890 ds->ds_rxstat.rs_rssi_ctl2 = MS(ads.ds_rxstatus0, 893 rs->rs_rssi_ctl2 = MS(ads.ds_rxstatus0,
891 AR_RxRSSIAnt02); 894 AR_RxRSSIAnt02);
892 ds->ds_rxstat.rs_rssi_ext0 = MS(ads.ds_rxstatus4, 895 rs->rs_rssi_ext0 = MS(ads.ds_rxstatus4,
893 AR_RxRSSIAnt10); 896 AR_RxRSSIAnt10);
894 ds->ds_rxstat.rs_rssi_ext1 = MS(ads.ds_rxstatus4, 897 rs->rs_rssi_ext1 = MS(ads.ds_rxstatus4,
895 AR_RxRSSIAnt11); 898 AR_RxRSSIAnt11);
896 ds->ds_rxstat.rs_rssi_ext2 = MS(ads.ds_rxstatus4, 899 rs->rs_rssi_ext2 = MS(ads.ds_rxstatus4,
897 AR_RxRSSIAnt12); 900 AR_RxRSSIAnt12);
898 } 901 }
899 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid) 902 if (ads.ds_rxstatus8 & AR_RxKeyIdxValid)
900 ds->ds_rxstat.rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx); 903 rs->rs_keyix = MS(ads.ds_rxstatus8, AR_KeyIdx);
901 else 904 else
902 ds->ds_rxstat.rs_keyix = ATH9K_RXKEYIX_INVALID; 905 rs->rs_keyix = ATH9K_RXKEYIX_INVALID;
903 906
904 ds->ds_rxstat.rs_rate = RXSTATUS_RATE(ah, (&ads)); 907 rs->rs_rate = RXSTATUS_RATE(ah, (&ads));
905 ds->ds_rxstat.rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0; 908 rs->rs_more = (ads.ds_rxstatus1 & AR_RxMore) ? 1 : 0;
906 909
907 ds->ds_rxstat.rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0; 910 rs->rs_isaggr = (ads.ds_rxstatus8 & AR_RxAggr) ? 1 : 0;
908 ds->ds_rxstat.rs_moreaggr = 911 rs->rs_moreaggr =
909 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0; 912 (ads.ds_rxstatus8 & AR_RxMoreAggr) ? 1 : 0;
910 ds->ds_rxstat.rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna); 913 rs->rs_antenna = MS(ads.ds_rxstatus3, AR_RxAntenna);
911 ds->ds_rxstat.rs_flags = 914 rs->rs_flags =
912 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0; 915 (ads.ds_rxstatus3 & AR_GI) ? ATH9K_RX_GI : 0;
913 ds->ds_rxstat.rs_flags |= 916 rs->rs_flags |=
914 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0; 917 (ads.ds_rxstatus3 & AR_2040) ? ATH9K_RX_2040 : 0;
915 918
916 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr) 919 if (ads.ds_rxstatus8 & AR_PreDelimCRCErr)
917 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_PRE; 920 rs->rs_flags |= ATH9K_RX_DELIM_CRC_PRE;
918 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr) 921 if (ads.ds_rxstatus8 & AR_PostDelimCRCErr)
919 ds->ds_rxstat.rs_flags |= ATH9K_RX_DELIM_CRC_POST; 922 rs->rs_flags |= ATH9K_RX_DELIM_CRC_POST;
920 if (ads.ds_rxstatus8 & AR_DecryptBusyErr) 923 if (ads.ds_rxstatus8 & AR_DecryptBusyErr)
921 ds->ds_rxstat.rs_flags |= ATH9K_RX_DECRYPT_BUSY; 924 rs->rs_flags |= ATH9K_RX_DECRYPT_BUSY;
922 925
923 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 926 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
924 if (ads.ds_rxstatus8 & AR_CRCErr) 927 if (ads.ds_rxstatus8 & AR_CRCErr)
925 ds->ds_rxstat.rs_status |= ATH9K_RXERR_CRC; 928 rs->rs_status |= ATH9K_RXERR_CRC;
926 else if (ads.ds_rxstatus8 & AR_PHYErr) { 929 else if (ads.ds_rxstatus8 & AR_PHYErr) {
927 ds->ds_rxstat.rs_status |= ATH9K_RXERR_PHY; 930 rs->rs_status |= ATH9K_RXERR_PHY;
928 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 931 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
929 ds->ds_rxstat.rs_phyerr = phyerr; 932 rs->rs_phyerr = phyerr;
930 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 933 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
931 ds->ds_rxstat.rs_status |= ATH9K_RXERR_DECRYPT; 934 rs->rs_status |= ATH9K_RXERR_DECRYPT;
932 else if (ads.ds_rxstatus8 & AR_MichaelErr) 935 else if (ads.ds_rxstatus8 & AR_MichaelErr)
933 ds->ds_rxstat.rs_status |= ATH9K_RXERR_MIC; 936 rs->rs_status |= ATH9K_RXERR_MIC;
934 } 937 }
935 938
936 return 0; 939 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 29851e6376a9..68dbd7a8ddca 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -150,6 +150,32 @@ struct ath_rx_status {
150 u32 evm2; 150 u32 evm2;
151}; 151};
152 152
153struct ath_htc_rx_status {
154 u64 rs_tstamp;
155 u16 rs_datalen;
156 u8 rs_status;
157 u8 rs_phyerr;
158 int8_t rs_rssi;
159 int8_t rs_rssi_ctl0;
160 int8_t rs_rssi_ctl1;
161 int8_t rs_rssi_ctl2;
162 int8_t rs_rssi_ext0;
163 int8_t rs_rssi_ext1;
164 int8_t rs_rssi_ext2;
165 u8 rs_keyix;
166 u8 rs_rate;
167 u8 rs_antenna;
168 u8 rs_more;
169 u8 rs_isaggr;
170 u8 rs_moreaggr;
171 u8 rs_num_delims;
172 u8 rs_flags;
173 u8 rs_dummy;
174 u32 evm0;
175 u32 evm1;
176 u32 evm2;
177};
178
153#define ATH9K_RXERR_CRC 0x01 179#define ATH9K_RXERR_CRC 0x01
154#define ATH9K_RXERR_PHY 0x02 180#define ATH9K_RXERR_PHY 0x02
155#define ATH9K_RXERR_FIFO 0x04 181#define ATH9K_RXERR_FIFO 0x04
@@ -207,18 +233,9 @@ struct ath_desc {
207 u32 ds_ctl0; 233 u32 ds_ctl0;
208 u32 ds_ctl1; 234 u32 ds_ctl1;
209 u32 ds_hw[20]; 235 u32 ds_hw[20];
210 union {
211 struct ath_tx_status tx;
212 struct ath_rx_status rx;
213 void *stats;
214 } ds_us;
215 void *ds_vdata; 236 void *ds_vdata;
216} __packed; 237} __packed;
217 238
218#define ds_txstat ds_us.tx
219#define ds_rxstat ds_us.rx
220#define ds_stat ds_us.stats
221
222#define ATH9K_TXDESC_CLRDMASK 0x0001 239#define ATH9K_TXDESC_CLRDMASK 0x0001
223#define ATH9K_TXDESC_NOACK 0x0002 240#define ATH9K_TXDESC_NOACK 0x0002
224#define ATH9K_TXDESC_RTSENA 0x0004 241#define ATH9K_TXDESC_RTSENA 0x0004
@@ -676,7 +693,8 @@ void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
676 u32 segLen, bool firstSeg, 693 u32 segLen, bool firstSeg,
677 bool lastSeg, const struct ath_desc *ds0); 694 bool lastSeg, const struct ath_desc *ds0);
678void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds); 695void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds);
679int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds); 696int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds,
697 struct ath_tx_status *ts);
680void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, 698void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
681 u32 pktLen, enum ath9k_pkt_type type, u32 txPower, 699 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
682 u32 keyIx, enum ath9k_key_type keyType, u32 flags); 700 u32 keyIx, enum ath9k_key_type keyType, u32 flags);
@@ -706,7 +724,7 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
706bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q); 724bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q);
707bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q); 725bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q);
708int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 726int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
709 u32 pa, struct ath_desc *nds, u64 tsf); 727 struct ath_rx_status *rs, u64 tsf);
710void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 728void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
711 u32 size, u32 flags); 729 u32 size, u32 flags);
712bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 730bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 115e1aeedb59..f7ef11407e27 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -225,7 +225,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
225 225
226 ath_cache_conf_rate(sc, &hw->conf); 226 ath_cache_conf_rate(sc, &hw->conf);
227 ath_update_txpow(sc); 227 ath_update_txpow(sc);
228 ath9k_hw_set_interrupts(ah, sc->imask); 228 ath9k_hw_set_interrupts(ah, ah->imask);
229 229
230 ps_restore: 230 ps_restore:
231 ath9k_ps_restore(sc); 231 ath9k_ps_restore(sc);
@@ -434,7 +434,7 @@ void ath9k_tasklet(unsigned long data)
434 ath_gen_timer_isr(sc->sc_ah); 434 ath_gen_timer_isr(sc->sc_ah);
435 435
436 /* re-enable hardware interrupt */ 436 /* re-enable hardware interrupt */
437 ath9k_hw_set_interrupts(ah, sc->imask); 437 ath9k_hw_set_interrupts(ah, ah->imask);
438 ath9k_ps_restore(sc); 438 ath9k_ps_restore(sc);
439} 439}
440 440
@@ -477,7 +477,7 @@ irqreturn_t ath_isr(int irq, void *dev)
477 * value to insure we only process bits we requested. 477 * value to insure we only process bits we requested.
478 */ 478 */
479 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */ 479 ath9k_hw_getisr(ah, &status); /* NB: clears ISR too */
480 status &= sc->imask; /* discard unasked-for bits */ 480 status &= ah->imask; /* discard unasked-for bits */
481 481
482 /* 482 /*
483 * If there are no status bits set, then this interrupt was not 483 * If there are no status bits set, then this interrupt was not
@@ -518,7 +518,7 @@ irqreturn_t ath_isr(int irq, void *dev)
518 * the interrupt. 518 * the interrupt.
519 */ 519 */
520 ath9k_hw_procmibevent(ah); 520 ath9k_hw_procmibevent(ah);
521 ath9k_hw_set_interrupts(ah, sc->imask); 521 ath9k_hw_set_interrupts(ah, ah->imask);
522 } 522 }
523 523
524 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 524 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
@@ -536,7 +536,7 @@ chip_reset:
536 536
537 if (sched) { 537 if (sched) {
538 /* turn off every interrupt except SWBA */ 538 /* turn off every interrupt except SWBA */
539 ath9k_hw_set_interrupts(ah, (sc->imask & ATH9K_INT_SWBA)); 539 ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA));
540 tasklet_schedule(&sc->intr_tq); 540 tasklet_schedule(&sc->intr_tq);
541 } 541 }
542 542
@@ -887,7 +887,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
887 ath_beacon_config(sc, NULL); /* restart beacons */ 887 ath_beacon_config(sc, NULL); /* restart beacons */
888 888
889 /* Re-Enable interrupts */ 889 /* Re-Enable interrupts */
890 ath9k_hw_set_interrupts(ah, sc->imask); 890 ath9k_hw_set_interrupts(ah, ah->imask);
891 891
892 /* Enable LED */ 892 /* Enable LED */
893 ath9k_hw_cfg_output(ah, ah->led_pin, 893 ath9k_hw_cfg_output(ah, ah->led_pin,
@@ -977,7 +977,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
977 if (sc->sc_flags & SC_OP_BEACONS) 977 if (sc->sc_flags & SC_OP_BEACONS)
978 ath_beacon_config(sc, NULL); /* restart beacons */ 978 ath_beacon_config(sc, NULL); /* restart beacons */
979 979
980 ath9k_hw_set_interrupts(ah, sc->imask); 980 ath9k_hw_set_interrupts(ah, ah->imask);
981 981
982 if (retry_tx) { 982 if (retry_tx) {
983 int i; 983 int i;
@@ -1162,23 +1162,23 @@ static int ath9k_start(struct ieee80211_hw *hw)
1162 } 1162 }
1163 1163
1164 /* Setup our intr mask. */ 1164 /* Setup our intr mask. */
1165 sc->imask = ATH9K_INT_RX | ATH9K_INT_TX 1165 ah->imask = ATH9K_INT_RX | ATH9K_INT_TX
1166 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 1166 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1167 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 1167 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1168 1168
1169 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 1169 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1170 sc->imask |= ATH9K_INT_GTT; 1170 ah->imask |= ATH9K_INT_GTT;
1171 1171
1172 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) 1172 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1173 sc->imask |= ATH9K_INT_CST; 1173 ah->imask |= ATH9K_INT_CST;
1174 1174
1175 ath_cache_conf_rate(sc, &hw->conf); 1175 ath_cache_conf_rate(sc, &hw->conf);
1176 1176
1177 sc->sc_flags &= ~SC_OP_INVALID; 1177 sc->sc_flags &= ~SC_OP_INVALID;
1178 1178
1179 /* Disable BMISS interrupt when we're not associated */ 1179 /* Disable BMISS interrupt when we're not associated */
1180 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 1180 ah->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1181 ath9k_hw_set_interrupts(ah, sc->imask); 1181 ath9k_hw_set_interrupts(ah, ah->imask);
1182 1182
1183 ieee80211_wake_queues(hw); 1183 ieee80211_wake_queues(hw);
1184 1184
@@ -1372,14 +1372,15 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1372{ 1372{
1373 struct ath_wiphy *aphy = hw->priv; 1373 struct ath_wiphy *aphy = hw->priv;
1374 struct ath_softc *sc = aphy->sc; 1374 struct ath_softc *sc = aphy->sc;
1375 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1375 struct ath_hw *ah = sc->sc_ah;
1376 struct ath_common *common = ath9k_hw_common(ah);
1376 struct ath_vif *avp = (void *)vif->drv_priv; 1377 struct ath_vif *avp = (void *)vif->drv_priv;
1377 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 1378 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
1378 int ret = 0; 1379 int ret = 0;
1379 1380
1380 mutex_lock(&sc->mutex); 1381 mutex_lock(&sc->mutex);
1381 1382
1382 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) && 1383 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) &&
1383 sc->nvifs > 0) { 1384 sc->nvifs > 0) {
1384 ret = -ENOBUFS; 1385 ret = -ENOBUFS;
1385 goto out; 1386 goto out;
@@ -1414,19 +1415,19 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1414 1415
1415 sc->nvifs++; 1416 sc->nvifs++;
1416 1417
1417 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1418 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1418 ath9k_set_bssid_mask(hw); 1419 ath9k_set_bssid_mask(hw);
1419 1420
1420 if (sc->nvifs > 1) 1421 if (sc->nvifs > 1)
1421 goto out; /* skip global settings for secondary vif */ 1422 goto out; /* skip global settings for secondary vif */
1422 1423
1423 if (ic_opmode == NL80211_IFTYPE_AP) { 1424 if (ic_opmode == NL80211_IFTYPE_AP) {
1424 ath9k_hw_set_tsfadjust(sc->sc_ah, 1); 1425 ath9k_hw_set_tsfadjust(ah, 1);
1425 sc->sc_flags |= SC_OP_TSF_RESET; 1426 sc->sc_flags |= SC_OP_TSF_RESET;
1426 } 1427 }
1427 1428
1428 /* Set the device opmode */ 1429 /* Set the device opmode */
1429 sc->sc_ah->opmode = ic_opmode; 1430 ah->opmode = ic_opmode;
1430 1431
1431 /* 1432 /*
1432 * Enable MIB interrupts when there are hardware phy counters. 1433 * Enable MIB interrupts when there are hardware phy counters.
@@ -1435,11 +1436,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1435 if ((vif->type == NL80211_IFTYPE_STATION) || 1436 if ((vif->type == NL80211_IFTYPE_STATION) ||
1436 (vif->type == NL80211_IFTYPE_ADHOC) || 1437 (vif->type == NL80211_IFTYPE_ADHOC) ||
1437 (vif->type == NL80211_IFTYPE_MESH_POINT)) { 1438 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
1438 sc->imask |= ATH9K_INT_MIB; 1439 ah->imask |= ATH9K_INT_MIB;
1439 sc->imask |= ATH9K_INT_TSFOOR; 1440 ah->imask |= ATH9K_INT_TSFOOR;
1440 } 1441 }
1441 1442
1442 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 1443 ath9k_hw_set_interrupts(ah, ah->imask);
1443 1444
1444 if (vif->type == NL80211_IFTYPE_AP || 1445 if (vif->type == NL80211_IFTYPE_AP ||
1445 vif->type == NL80211_IFTYPE_ADHOC || 1446 vif->type == NL80211_IFTYPE_ADHOC ||
@@ -1495,15 +1496,16 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1495 1496
1496void ath9k_enable_ps(struct ath_softc *sc) 1497void ath9k_enable_ps(struct ath_softc *sc)
1497{ 1498{
1499 struct ath_hw *ah = sc->sc_ah;
1500
1498 sc->ps_enabled = true; 1501 sc->ps_enabled = true;
1499 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) { 1502 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
1500 if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) { 1503 if ((ah->imask & ATH9K_INT_TIM_TIMER) == 0) {
1501 sc->imask |= ATH9K_INT_TIM_TIMER; 1504 ah->imask |= ATH9K_INT_TIM_TIMER;
1502 ath9k_hw_set_interrupts(sc->sc_ah, 1505 ath9k_hw_set_interrupts(ah, ah->imask);
1503 sc->imask);
1504 } 1506 }
1505 } 1507 }
1506 ath9k_hw_setrxabort(sc->sc_ah, 1); 1508 ath9k_hw_setrxabort(ah, 1);
1507} 1509}
1508 1510
1509static int ath9k_config(struct ieee80211_hw *hw, u32 changed) 1511static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
@@ -1579,10 +1581,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1579 PS_WAIT_FOR_CAB | 1581 PS_WAIT_FOR_CAB |
1580 PS_WAIT_FOR_PSPOLL_DATA | 1582 PS_WAIT_FOR_PSPOLL_DATA |
1581 PS_WAIT_FOR_TX_ACK); 1583 PS_WAIT_FOR_TX_ACK);
1582 if (sc->imask & ATH9K_INT_TIM_TIMER) { 1584 if (ah->imask & ATH9K_INT_TIM_TIMER) {
1583 sc->imask &= ~ATH9K_INT_TIM_TIMER; 1585 ah->imask &= ~ATH9K_INT_TIM_TIMER;
1584 ath9k_hw_set_interrupts(sc->sc_ah, 1586 ath9k_hw_set_interrupts(sc->sc_ah,
1585 sc->imask); 1587 ah->imask);
1586 } 1588 }
1587 } 1589 }
1588 } 1590 }
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 9441c6718a30..1ec836cf1c0d 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -88,6 +88,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
88} 88}
89 89
90static const struct ath_bus_ops ath_pci_bus_ops = { 90static const struct ath_bus_ops ath_pci_bus_ops = {
91 .ath_bus_type = ATH_PCI,
91 .read_cachesize = ath_pci_read_cachesize, 92 .read_cachesize = ath_pci_read_cachesize,
92 .eeprom_read = ath_pci_eeprom_read, 93 .eeprom_read = ath_pci_eeprom_read,
93 .bt_coex_prep = ath_pci_bt_coex_prep, 94 .bt_coex_prep = ath_pci_bt_coex_prep,
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 0999a495fd46..0132e4c9a9f9 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -503,6 +503,8 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
503#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24 503#define AR_PHY_TX_PWRCTRL_ERR_EST_MODE_S 24
504 504
505#define AR_PHY_TX_PWRCTRL7 0xa274 505#define AR_PHY_TX_PWRCTRL7 0xa274
506#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX 0x0007E000
507#define AR_PHY_TX_PWRCTRL_TX_GAIN_TAB_MAX_S 13
506#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000 508#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN 0x01F80000
507#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19 509#define AR_PHY_TX_PWRCTRL_INIT_TX_GAIN_S 19
508 510
@@ -513,8 +515,16 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
513#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31 515#define AR_PHY_TX_PWRCTRL9_RES_DC_REMOVAL_S 31
514 516
515#define AR_PHY_TX_GAIN_TBL1 0xa300 517#define AR_PHY_TX_GAIN_TBL1 0xa300
516#define AR_PHY_TX_GAIN 0x0007F000 518#define AR_PHY_TX_GAIN_CLC 0x0000001E
517#define AR_PHY_TX_GAIN_S 12 519#define AR_PHY_TX_GAIN_CLC_S 1
520#define AR_PHY_TX_GAIN 0x0007F000
521#define AR_PHY_TX_GAIN_S 12
522
523#define AR_PHY_CLC_TBL1 0xa35c
524#define AR_PHY_CLC_I0 0x07ff0000
525#define AR_PHY_CLC_I0_S 16
526#define AR_PHY_CLC_Q0 0x0000ffd0
527#define AR_PHY_CLC_Q0_S 5
518 528
519#define AR_PHY_CH0_TX_PWRCTRL11 0xa398 529#define AR_PHY_CH0_TX_PWRCTRL11 0xa398
520#define AR_PHY_CH1_TX_PWRCTRL11 0xb398 530#define AR_PHY_CH1_TX_PWRCTRL11 0xb398
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 244e1c629177..ee81291f2fba 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -1228,8 +1228,12 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1228 long_retry = rate->count - 1; 1228 long_retry = rate->count - 1;
1229 } 1229 }
1230 1230
1231 if (!priv_sta || !ieee80211_is_data(fc) || 1231 if (!priv_sta || !ieee80211_is_data(fc))
1232 !(tx_info->pad[0] & ATH_TX_INFO_UPDATE_RC)) 1232 return;
1233
1234 /* This packet was aggregated but doesn't carry status info */
1235 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
1236 !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
1233 return; 1237 return;
1234 1238
1235 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED) 1239 if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 4f6d6fd442f4..3d8d40cdc99e 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -110,8 +110,8 @@ struct ath_rate_table {
110 int rate_cnt; 110 int rate_cnt;
111 int mcs_start; 111 int mcs_start;
112 struct { 112 struct {
113 int valid; 113 u8 valid;
114 int valid_single_stream; 114 u8 valid_single_stream;
115 u8 phy; 115 u8 phy;
116 u32 ratekbps; 116 u32 ratekbps;
117 u32 user_ratekbps; 117 u32 user_ratekbps;
@@ -172,14 +172,13 @@ struct ath_rate_priv {
172 172
173#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0) 173#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
174#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1) 174#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
175#define ATH_TX_INFO_UPDATE_RC (1 << 2)
176#define ATH_TX_INFO_XRETRY (1 << 3) 175#define ATH_TX_INFO_XRETRY (1 << 3)
177#define ATH_TX_INFO_UNDERRUN (1 << 4) 176#define ATH_TX_INFO_UNDERRUN (1 << 4)
178 177
179enum ath9k_internal_frame_type { 178enum ath9k_internal_frame_type {
180 ATH9K_NOT_INTERNAL, 179 ATH9K_IFT_NOT_INTERNAL,
181 ATH9K_INT_PAUSE, 180 ATH9K_IFT_PAUSE,
182 ATH9K_INT_UNPAUSE 181 ATH9K_IFT_UNPAUSE
183}; 182};
184 183
185int ath_rate_control_register(void); 184int ath_rate_control_register(void);
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 1ca42e5148c8..94560e2fe376 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -477,7 +477,6 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
477 477
478 struct ath_buf *bf; 478 struct ath_buf *bf;
479 struct ath_desc *ds; 479 struct ath_desc *ds;
480 struct ath_rx_status *rx_stats;
481 struct sk_buff *skb = NULL, *requeue_skb; 480 struct sk_buff *skb = NULL, *requeue_skb;
482 struct ieee80211_rx_status *rxs; 481 struct ieee80211_rx_status *rxs;
483 struct ath_hw *ah = sc->sc_ah; 482 struct ath_hw *ah = sc->sc_ah;
@@ -491,6 +490,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
491 struct ieee80211_hdr *hdr; 490 struct ieee80211_hdr *hdr;
492 int retval; 491 int retval;
493 bool decrypt_error = false; 492 bool decrypt_error = false;
493 struct ath_rx_status rs;
494 494
495 spin_lock_bh(&sc->rx.rxbuflock); 495 spin_lock_bh(&sc->rx.rxbuflock);
496 496
@@ -518,14 +518,14 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
518 * on. All this is necessary because of our use of 518 * on. All this is necessary because of our use of
519 * a self-linked list to avoid rx overruns. 519 * a self-linked list to avoid rx overruns.
520 */ 520 */
521 retval = ath9k_hw_rxprocdesc(ah, ds, 521 memset(&rs, 0, sizeof(rs));
522 bf->bf_daddr, 522 retval = ath9k_hw_rxprocdesc(ah, ds, &rs, 0);
523 PA2DESC(sc, ds->ds_link),
524 0);
525 if (retval == -EINPROGRESS) { 523 if (retval == -EINPROGRESS) {
524 struct ath_rx_status trs;
526 struct ath_buf *tbf; 525 struct ath_buf *tbf;
527 struct ath_desc *tds; 526 struct ath_desc *tds;
528 527
528 memset(&trs, 0, sizeof(trs));
529 if (list_is_last(&bf->list, &sc->rx.rxbuf)) { 529 if (list_is_last(&bf->list, &sc->rx.rxbuf)) {
530 sc->rx.rxlink = NULL; 530 sc->rx.rxlink = NULL;
531 break; 531 break;
@@ -545,8 +545,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
545 */ 545 */
546 546
547 tds = tbf->bf_desc; 547 tds = tbf->bf_desc;
548 retval = ath9k_hw_rxprocdesc(ah, tds, tbf->bf_daddr, 548 retval = ath9k_hw_rxprocdesc(ah, tds, &trs, 0);
549 PA2DESC(sc, tds->ds_link), 0);
550 if (retval == -EINPROGRESS) { 549 if (retval == -EINPROGRESS) {
551 break; 550 break;
552 } 551 }
@@ -569,9 +568,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
569 rxs = IEEE80211_SKB_RXCB(skb); 568 rxs = IEEE80211_SKB_RXCB(skb);
570 569
571 hw = ath_get_virt_hw(sc, hdr); 570 hw = ath_get_virt_hw(sc, hdr);
572 rx_stats = &ds->ds_rxstat;
573 571
574 ath_debug_stat_rx(sc, bf); 572 ath_debug_stat_rx(sc, &rs);
575 573
576 /* 574 /*
577 * If we're asked to flush receive queue, directly 575 * If we're asked to flush receive queue, directly
@@ -580,7 +578,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
580 if (flush) 578 if (flush)
581 goto requeue; 579 goto requeue;
582 580
583 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, rx_stats, 581 retval = ath9k_cmn_rx_skb_preprocess(common, hw, skb, &rs,
584 rxs, &decrypt_error); 582 rxs, &decrypt_error);
585 if (retval) 583 if (retval)
586 goto requeue; 584 goto requeue;
@@ -601,9 +599,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
601 common->rx_bufsize, 599 common->rx_bufsize,
602 DMA_FROM_DEVICE); 600 DMA_FROM_DEVICE);
603 601
604 skb_put(skb, rx_stats->rs_datalen); 602 skb_put(skb, rs.rs_datalen);
605 603
606 ath9k_cmn_rx_skb_postprocess(common, skb, rx_stats, 604 ath9k_cmn_rx_skb_postprocess(common, skb, &rs,
607 rxs, decrypt_error); 605 rxs, decrypt_error);
608 606
609 /* We will now give hardware our shiny new allocated skb */ 607 /* We will now give hardware our shiny new allocated skb */
@@ -626,9 +624,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
626 * change the default rx antenna if rx diversity chooses the 624 * change the default rx antenna if rx diversity chooses the
627 * other antenna 3 times in a row. 625 * other antenna 3 times in a row.
628 */ 626 */
629 if (sc->rx.defant != ds->ds_rxstat.rs_antenna) { 627 if (sc->rx.defant != rs.rs_antenna) {
630 if (++sc->rx.rxotherant >= 3) 628 if (++sc->rx.rxotherant >= 3)
631 ath_setdefantenna(sc, rx_stats->rs_antenna); 629 ath_setdefantenna(sc, rs.rs_antenna);
632 } else { 630 } else {
633 sc->rx.rxotherant = 0; 631 sc->rx.rxotherant = 0;
634 } 632 }
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 72cfa8ebd9ae..7e36ad7421b7 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -679,7 +679,7 @@
679 679
680#define AR_WA 0x4004 680#define AR_WA 0x4004
681#define AR_WA_D3_L1_DISABLE (1 << 14) 681#define AR_WA_D3_L1_DISABLE (1 << 14)
682#define AR9285_WA_DEFAULT 0x004a05cb 682#define AR9285_WA_DEFAULT 0x004a050b
683#define AR9280_WA_DEFAULT 0x0040073b 683#define AR9280_WA_DEFAULT 0x0040073b
684#define AR_WA_DEFAULT 0x0000073f 684#define AR_WA_DEFAULT 0x0000073f
685 685
@@ -845,6 +845,10 @@
845 (AR_SREV_9271(_ah) && \ 845 (AR_SREV_9271(_ah) && \
846 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11)) 846 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9271_11))
847 847
848#define AR_SREV_9285E_20(_ah) \
849 (AR_SREV_9285_12_OR_LATER(_ah) && \
850 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
851
848#define AR_RADIO_SREV_MAJOR 0xf0 852#define AR_RADIO_SREV_MAJOR 0xf0
849#define AR_RAD5133_SREV_MAJOR 0xc0 853#define AR_RAD5133_SREV_MAJOR 0xc0
850#define AR_RAD2133_SREV_MAJOR 0xd0 854#define AR_RAD2133_SREV_MAJOR 0xd0
@@ -940,6 +944,7 @@ enum {
940#define AR928X_NUM_GPIO 10 944#define AR928X_NUM_GPIO 10
941#define AR9285_NUM_GPIO 12 945#define AR9285_NUM_GPIO 12
942#define AR9287_NUM_GPIO 11 946#define AR9287_NUM_GPIO 11
947#define AR9271_NUM_GPIO 16
943 948
944#define AR_GPIO_IN_OUT 0x4048 949#define AR_GPIO_IN_OUT 0x4048
945#define AR_GPIO_IN_VAL 0x0FFFC000 950#define AR_GPIO_IN_VAL 0x0FFFC000
@@ -950,6 +955,8 @@ enum {
950#define AR9285_GPIO_IN_VAL_S 12 955#define AR9285_GPIO_IN_VAL_S 12
951#define AR9287_GPIO_IN_VAL 0x003FF800 956#define AR9287_GPIO_IN_VAL 0x003FF800
952#define AR9287_GPIO_IN_VAL_S 11 957#define AR9287_GPIO_IN_VAL_S 11
958#define AR9271_GPIO_IN_VAL 0xFFFF0000
959#define AR9271_GPIO_IN_VAL_S 16
953 960
954#define AR_GPIO_OE_OUT 0x404c 961#define AR_GPIO_OE_OUT 0x404c
955#define AR_GPIO_OE_OUT_DRV 0x3 962#define AR_GPIO_OE_OUT_DRV 0x3
@@ -1178,6 +1185,13 @@ enum {
1178#define AR9285_AN_RF2G4_DB2_4 0x00003800 1185#define AR9285_AN_RF2G4_DB2_4 0x00003800
1179#define AR9285_AN_RF2G4_DB2_4_S 11 1186#define AR9285_AN_RF2G4_DB2_4_S 11
1180 1187
1188#define AR9285_RF2G5 0x7830
1189#define AR9285_RF2G5_IC50TX 0xfffff8ff
1190#define AR9285_RF2G5_IC50TX_SET 0x00000400
1191#define AR9285_RF2G5_IC50TX_XE_SET 0x00000500
1192#define AR9285_RF2G5_IC50TX_CLEAR 0x00000700
1193#define AR9285_RF2G5_IC50TX_CLEAR_S 8
1194
1181/* AR9271 : 0x7828, 0x782c different setting from AR9285 */ 1195/* AR9271 : 0x7828, 0x782c different setting from AR9285 */
1182#define AR9271_AN_RF2G3_OB_cck 0x001C0000 1196#define AR9271_AN_RF2G3_OB_cck 0x001C0000
1183#define AR9271_AN_RF2G3_OB_cck_S 18 1197#define AR9271_AN_RF2G3_OB_cck_S 18
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 00c0e21a4af7..105ad40968f6 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -220,7 +220,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
220 220
221 memset(&txctl, 0, sizeof(struct ath_tx_control)); 221 memset(&txctl, 0, sizeof(struct ath_tx_control));
222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]]; 222 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[ATH9K_WME_AC_VO]];
223 txctl.frame_type = ps ? ATH9K_INT_PAUSE : ATH9K_INT_UNPAUSE; 223 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
224 224
225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0) 225 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
226 goto exit; 226 goto exit;
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
new file mode 100644
index 000000000000..f2ff18cf3e60
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "htc.h"
18
19static const char *wmi_cmd_to_name(enum wmi_cmd_id wmi_cmd)
20{
21 switch (wmi_cmd) {
22 case WMI_ECHO_CMDID:
23 return "WMI_ECHO_CMDID";
24 case WMI_ACCESS_MEMORY_CMDID:
25 return "WMI_ACCESS_MEMORY_CMDID";
26 case WMI_DISABLE_INTR_CMDID:
27 return "WMI_DISABLE_INTR_CMDID";
28 case WMI_ENABLE_INTR_CMDID:
29 return "WMI_ENABLE_INTR_CMDID";
30 case WMI_RX_LINK_CMDID:
31 return "WMI_RX_LINK_CMDID";
32 case WMI_ATH_INIT_CMDID:
33 return "WMI_ATH_INIT_CMDID";
34 case WMI_ABORT_TXQ_CMDID:
35 return "WMI_ABORT_TXQ_CMDID";
36 case WMI_STOP_TX_DMA_CMDID:
37 return "WMI_STOP_TX_DMA_CMDID";
38 case WMI_STOP_DMA_RECV_CMDID:
39 return "WMI_STOP_DMA_RECV_CMDID";
40 case WMI_ABORT_TX_DMA_CMDID:
41 return "WMI_ABORT_TX_DMA_CMDID";
42 case WMI_DRAIN_TXQ_CMDID:
43 return "WMI_DRAIN_TXQ_CMDID";
44 case WMI_DRAIN_TXQ_ALL_CMDID:
45 return "WMI_DRAIN_TXQ_ALL_CMDID";
46 case WMI_START_RECV_CMDID:
47 return "WMI_START_RECV_CMDID";
48 case WMI_STOP_RECV_CMDID:
49 return "WMI_STOP_RECV_CMDID";
50 case WMI_FLUSH_RECV_CMDID:
51 return "WMI_FLUSH_RECV_CMDID";
52 case WMI_SET_MODE_CMDID:
53 return "WMI_SET_MODE_CMDID";
54 case WMI_RESET_CMDID:
55 return "WMI_RESET_CMDID";
56 case WMI_NODE_CREATE_CMDID:
57 return "WMI_NODE_CREATE_CMDID";
58 case WMI_NODE_REMOVE_CMDID:
59 return "WMI_NODE_REMOVE_CMDID";
60 case WMI_VAP_REMOVE_CMDID:
61 return "WMI_VAP_REMOVE_CMDID";
62 case WMI_VAP_CREATE_CMDID:
63 return "WMI_VAP_CREATE_CMDID";
64 case WMI_BEACON_UPDATE_CMDID:
65 return "WMI_BEACON_UPDATE_CMDID";
66 case WMI_REG_READ_CMDID:
67 return "WMI_REG_READ_CMDID";
68 case WMI_REG_WRITE_CMDID:
69 return "WMI_REG_WRITE_CMDID";
70 case WMI_RC_STATE_CHANGE_CMDID:
71 return "WMI_RC_STATE_CHANGE_CMDID";
72 case WMI_RC_RATE_UPDATE_CMDID:
73 return "WMI_RC_RATE_UPDATE_CMDID";
74 case WMI_DEBUG_INFO_CMDID:
75 return "WMI_DEBUG_INFO_CMDID";
76 case WMI_HOST_ATTACH:
77 return "WMI_HOST_ATTACH";
78 case WMI_TARGET_IC_UPDATE_CMDID:
79 return "WMI_TARGET_IC_UPDATE_CMDID";
80 case WMI_TGT_STATS_CMDID:
81 return "WMI_TGT_STATS_CMDID";
82 case WMI_TX_AGGR_ENABLE_CMDID:
83 return "WMI_TX_AGGR_ENABLE_CMDID";
84 case WMI_TGT_DETACH_CMDID:
85 return "WMI_TGT_DETACH_CMDID";
86 case WMI_TGT_TXQ_ENABLE_CMDID:
87 return "WMI_TGT_TXQ_ENABLE_CMDID";
88 }
89
90 return "Bogus";
91}
92
93struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv)
94{
95 struct wmi *wmi;
96
97 wmi = kzalloc(sizeof(struct wmi), GFP_KERNEL);
98 if (!wmi)
99 return NULL;
100
101 wmi->drv_priv = priv;
102 wmi->stopped = false;
103 mutex_init(&wmi->op_mutex);
104 init_completion(&wmi->cmd_wait);
105
106 return wmi;
107}
108
109void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
110{
111 struct wmi *wmi = priv->wmi;
112
113 mutex_lock(&wmi->op_mutex);
114 wmi->stopped = true;
115 mutex_unlock(&wmi->op_mutex);
116
117 kfree(priv->wmi);
118}
119
120void ath9k_wmi_tasklet(unsigned long data)
121{
122 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
123 struct ath_common *common = ath9k_hw_common(priv->ah);
124 struct wmi_cmd_hdr *hdr;
125 struct wmi_swba *swba_hdr;
126 enum wmi_event_id event;
127 struct sk_buff *skb;
128 void *wmi_event;
129 unsigned long flags;
130#ifdef CONFIG_ATH9K_HTC_DEBUGFS
131 u32 txrate;
132#endif
133
134 spin_lock_irqsave(&priv->wmi->wmi_lock, flags);
135 skb = priv->wmi->wmi_skb;
136 spin_unlock_irqrestore(&priv->wmi->wmi_lock, flags);
137
138 hdr = (struct wmi_cmd_hdr *) skb->data;
139 event = be16_to_cpu(hdr->command_id);
140 wmi_event = skb_pull(skb, sizeof(struct wmi_cmd_hdr));
141
142 ath_print(common, ATH_DBG_WMI,
143 "WMI Event: 0x%x\n", event);
144
145 switch (event) {
146 case WMI_TGT_RDY_EVENTID:
147 break;
148 case WMI_SWBA_EVENTID:
149 swba_hdr = (struct wmi_swba *) wmi_event;
150 ath9k_htc_swba(priv, swba_hdr->beacon_pending);
151 break;
152 case WMI_FATAL_EVENTID:
153 break;
154 case WMI_TXTO_EVENTID:
155 break;
156 case WMI_BMISS_EVENTID:
157 break;
158 case WMI_WLAN_TXCOMP_EVENTID:
159 break;
160 case WMI_DELBA_EVENTID:
161 break;
162 case WMI_TXRATE_EVENTID:
163#ifdef CONFIG_ATH9K_HTC_DEBUGFS
164 txrate = ((struct wmi_event_txrate *)wmi_event)->txrate;
165 priv->debug.txrate = be32_to_cpu(txrate);
166#endif
167 break;
168 default:
169 break;
170 }
171
172 kfree_skb(skb);
173}
174
175static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
176{
177 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
178
179 if (wmi->cmd_rsp_buf != NULL && wmi->cmd_rsp_len != 0)
180 memcpy(wmi->cmd_rsp_buf, skb->data, wmi->cmd_rsp_len);
181
182 complete(&wmi->cmd_wait);
183}
184
185static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
186 enum htc_endpoint_id epid)
187{
188 struct wmi *wmi = (struct wmi *) priv;
189 struct wmi_cmd_hdr *hdr;
190 u16 cmd_id;
191
192 if (unlikely(wmi->stopped))
193 goto free_skb;
194
195 hdr = (struct wmi_cmd_hdr *) skb->data;
196 cmd_id = be16_to_cpu(hdr->command_id);
197
198 if (cmd_id & 0x1000) {
199 spin_lock(&wmi->wmi_lock);
200 wmi->wmi_skb = skb;
201 spin_unlock(&wmi->wmi_lock);
202 tasklet_schedule(&wmi->drv_priv->wmi_tasklet);
203 return;
204 }
205
206 /* WMI command response */
207 ath9k_wmi_rsp_callback(wmi, skb);
208
209free_skb:
210 kfree_skb(skb);
211}
212
213static void ath9k_wmi_ctrl_tx(void *priv, struct sk_buff *skb,
214 enum htc_endpoint_id epid, bool txok)
215{
216 kfree_skb(skb);
217}
218
219int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
220 enum htc_endpoint_id *wmi_ctrl_epid)
221{
222 struct htc_service_connreq connect;
223 int ret;
224
225 wmi->htc = htc;
226
227 memset(&connect, 0, sizeof(connect));
228
229 connect.ep_callbacks.priv = wmi;
230 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx;
231 connect.ep_callbacks.rx = ath9k_wmi_ctrl_rx;
232 connect.service_id = WMI_CONTROL_SVC;
233
234 ret = htc_connect_service(htc, &connect, &wmi->ctrl_epid);
235 if (ret)
236 return ret;
237
238 *wmi_ctrl_epid = wmi->ctrl_epid;
239
240 return 0;
241}
242
243static int ath9k_wmi_cmd_issue(struct wmi *wmi,
244 struct sk_buff *skb,
245 enum wmi_cmd_id cmd, u16 len)
246{
247 struct wmi_cmd_hdr *hdr;
248
249 hdr = (struct wmi_cmd_hdr *) skb_push(skb, sizeof(struct wmi_cmd_hdr));
250 hdr->command_id = cpu_to_be16(cmd);
251 hdr->seq_no = cpu_to_be16(++wmi->tx_seq_id);
252
253 return htc_send(wmi->htc, skb, wmi->ctrl_epid, NULL);
254}
255
256int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
257 u8 *cmd_buf, u32 cmd_len,
258 u8 *rsp_buf, u32 rsp_len,
259 u32 timeout)
260{
261 struct ath_hw *ah = wmi->drv_priv->ah;
262 struct ath_common *common = ath9k_hw_common(ah);
263 u16 headroom = sizeof(struct htc_frame_hdr) +
264 sizeof(struct wmi_cmd_hdr);
265 struct sk_buff *skb;
266 u8 *data;
267 int time_left, ret = 0;
268
269 if (!wmi)
270 return -EINVAL;
271
272 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
273 if (!skb)
274 return -ENOMEM;
275
276 skb_reserve(skb, headroom);
277
278 if (cmd_len != 0 && cmd_buf != NULL) {
279 data = (u8 *) skb_put(skb, cmd_len);
280 memcpy(data, cmd_buf, cmd_len);
281 }
282
283 mutex_lock(&wmi->op_mutex);
284
285 /* check if wmi stopped flag is set */
286 if (unlikely(wmi->stopped)) {
287 ret = -EPROTO;
288 goto out;
289 }
290
291 /* record the rsp buffer and length */
292 wmi->cmd_rsp_buf = rsp_buf;
293 wmi->cmd_rsp_len = rsp_len;
294
295 ret = ath9k_wmi_cmd_issue(wmi, skb, cmd_id, cmd_len);
296 if (ret)
297 goto out;
298
299 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
300 if (!time_left) {
301 ath_print(common, ATH_DBG_WMI,
302 "Timeout waiting for WMI command: %s\n",
303 wmi_cmd_to_name(cmd_id));
304 mutex_unlock(&wmi->op_mutex);
305 return -ETIMEDOUT;
306 }
307
308 mutex_unlock(&wmi->op_mutex);
309
310 return 0;
311
312out:
313 ath_print(common, ATH_DBG_WMI,
314 "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
315 mutex_unlock(&wmi->op_mutex);
316 kfree_skb(skb);
317
318 return ret;
319}
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
new file mode 100644
index 000000000000..39ef926f27c2
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef WMI_H
18#define WMI_H
19
20
21struct wmi_event_txrate {
22 u32 txrate;
23 struct {
24 u8 rssi_thresh;
25 u8 per;
26 } rc_stats;
27} __packed;
28
29struct wmi_cmd_hdr {
30 u16 command_id;
31 u16 seq_no;
32} __packed;
33
34struct wmi_swba {
35 u8 beacon_pending;
36} __packed;
37
38enum wmi_cmd_id {
39 WMI_ECHO_CMDID = 0x0001,
40 WMI_ACCESS_MEMORY_CMDID,
41
42 /* Commands to Target */
43 WMI_DISABLE_INTR_CMDID,
44 WMI_ENABLE_INTR_CMDID,
45 WMI_RX_LINK_CMDID,
46 WMI_ATH_INIT_CMDID,
47 WMI_ABORT_TXQ_CMDID,
48 WMI_STOP_TX_DMA_CMDID,
49 WMI_STOP_DMA_RECV_CMDID,
50 WMI_ABORT_TX_DMA_CMDID,
51 WMI_DRAIN_TXQ_CMDID,
52 WMI_DRAIN_TXQ_ALL_CMDID,
53 WMI_START_RECV_CMDID,
54 WMI_STOP_RECV_CMDID,
55 WMI_FLUSH_RECV_CMDID,
56 WMI_SET_MODE_CMDID,
57 WMI_RESET_CMDID,
58 WMI_NODE_CREATE_CMDID,
59 WMI_NODE_REMOVE_CMDID,
60 WMI_VAP_REMOVE_CMDID,
61 WMI_VAP_CREATE_CMDID,
62 WMI_BEACON_UPDATE_CMDID,
63 WMI_REG_READ_CMDID,
64 WMI_REG_WRITE_CMDID,
65 WMI_RC_STATE_CHANGE_CMDID,
66 WMI_RC_RATE_UPDATE_CMDID,
67 WMI_DEBUG_INFO_CMDID,
68 WMI_HOST_ATTACH,
69 WMI_TARGET_IC_UPDATE_CMDID,
70 WMI_TGT_STATS_CMDID,
71 WMI_TX_AGGR_ENABLE_CMDID,
72 WMI_TGT_DETACH_CMDID,
73 WMI_TGT_TXQ_ENABLE_CMDID,
74};
75
76enum wmi_event_id {
77 WMI_TGT_RDY_EVENTID = 0x1001,
78 WMI_SWBA_EVENTID,
79 WMI_FATAL_EVENTID,
80 WMI_TXTO_EVENTID,
81 WMI_BMISS_EVENTID,
82 WMI_WLAN_TXCOMP_EVENTID,
83 WMI_DELBA_EVENTID,
84 WMI_TXRATE_EVENTID,
85};
86
87struct wmi {
88 struct ath9k_htc_priv *drv_priv;
89 struct htc_target *htc;
90 enum htc_endpoint_id ctrl_epid;
91 struct mutex op_mutex;
92 struct completion cmd_wait;
93 u16 tx_seq_id;
94 u8 *cmd_rsp_buf;
95 u32 cmd_rsp_len;
96 bool stopped;
97
98 struct sk_buff *wmi_skb;
99 spinlock_t wmi_lock;
100};
101
102struct wmi *ath9k_init_wmi(struct ath9k_htc_priv *priv);
103void ath9k_deinit_wmi(struct ath9k_htc_priv *priv);
104int ath9k_wmi_connect(struct htc_target *htc, struct wmi *wmi,
105 enum htc_endpoint_id *wmi_ctrl_epid);
106int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
107 u8 *cmd_buf, u32 cmd_len,
108 u8 *rsp_buf, u32 rsp_len,
109 u32 timeout);
110void ath9k_wmi_tasklet(unsigned long data);
111
112#define WMI_CMD(_wmi_cmd) \
113 do { \
114 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, NULL, 0, \
115 (u8 *) &cmd_rsp, \
116 sizeof(cmd_rsp), HZ); \
117 } while (0)
118
119#define WMI_CMD_BUF(_wmi_cmd, _buf) \
120 do { \
121 ret = ath9k_wmi_cmd(priv->wmi, _wmi_cmd, \
122 (u8 *) _buf, sizeof(*_buf), \
123 &cmd_rsp, sizeof(cmd_rsp), HZ); \
124 } while (0)
125
126#endif /* WMI_H */
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 294b486bc3ed..02df4cbf179f 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -59,15 +59,14 @@ static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq,
59 struct ath_atx_tid *tid, 59 struct ath_atx_tid *tid,
60 struct list_head *bf_head); 60 struct list_head *bf_head);
61static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 61static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
62 struct ath_txq *txq, 62 struct ath_txq *txq, struct list_head *bf_q,
63 struct list_head *bf_q, 63 struct ath_tx_status *ts, int txok, int sendbar);
64 int txok, int sendbar);
65static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 64static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
66 struct list_head *head); 65 struct list_head *head);
67static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 66static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf);
68static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 67static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
69 int txok); 68 struct ath_tx_status *ts, int txok);
70static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 69static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
71 int nbad, int txok, bool update_rc); 70 int nbad, int txok, bool update_rc);
72 71
73enum { 72enum {
@@ -223,6 +222,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
223{ 222{
224 struct ath_buf *bf; 223 struct ath_buf *bf;
225 struct list_head bf_head; 224 struct list_head bf_head;
225 struct ath_tx_status ts;
226
227 memset(&ts, 0, sizeof(ts));
226 INIT_LIST_HEAD(&bf_head); 228 INIT_LIST_HEAD(&bf_head);
227 229
228 for (;;) { 230 for (;;) {
@@ -236,7 +238,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
236 ath_tx_update_baw(sc, tid, bf->bf_seqno); 238 ath_tx_update_baw(sc, tid, bf->bf_seqno);
237 239
238 spin_unlock(&txq->axq_lock); 240 spin_unlock(&txq->axq_lock);
239 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 241 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
240 spin_lock(&txq->axq_lock); 242 spin_lock(&txq->axq_lock);
241 } 243 }
242 244
@@ -286,7 +288,7 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
286 288
287static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 289static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
288 struct ath_buf *bf, struct list_head *bf_q, 290 struct ath_buf *bf, struct list_head *bf_q,
289 int txok) 291 struct ath_tx_status *ts, int txok)
290{ 292{
291 struct ath_node *an = NULL; 293 struct ath_node *an = NULL;
292 struct sk_buff *skb; 294 struct sk_buff *skb;
@@ -296,7 +298,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
296 struct ieee80211_tx_info *tx_info; 298 struct ieee80211_tx_info *tx_info;
297 struct ath_atx_tid *tid = NULL; 299 struct ath_atx_tid *tid = NULL;
298 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf; 300 struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
299 struct ath_desc *ds = bf_last->bf_desc;
300 struct list_head bf_head, bf_pending; 301 struct list_head bf_head, bf_pending;
301 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0; 302 u16 seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
302 u32 ba[WME_BA_BMP_SIZE >> 5]; 303 u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -325,10 +326,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
325 memset(ba, 0, WME_BA_BMP_SIZE >> 3); 326 memset(ba, 0, WME_BA_BMP_SIZE >> 3);
326 327
327 if (isaggr && txok) { 328 if (isaggr && txok) {
328 if (ATH_DS_TX_BA(ds)) { 329 if (ts->ts_flags & ATH9K_TX_BA) {
329 seq_st = ATH_DS_BA_SEQ(ds); 330 seq_st = ts->ts_seqnum;
330 memcpy(ba, ATH_DS_BA_BITMAP(ds), 331 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
331 WME_BA_BMP_SIZE >> 3);
332 } else { 332 } else {
333 /* 333 /*
334 * AR5416 can become deaf/mute when BA 334 * AR5416 can become deaf/mute when BA
@@ -345,7 +345,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
345 INIT_LIST_HEAD(&bf_pending); 345 INIT_LIST_HEAD(&bf_pending);
346 INIT_LIST_HEAD(&bf_head); 346 INIT_LIST_HEAD(&bf_head);
347 347
348 nbad = ath_tx_num_badfrms(sc, bf, txok); 348 nbad = ath_tx_num_badfrms(sc, bf, ts, txok);
349 while (bf) { 349 while (bf) {
350 txfail = txpending = 0; 350 txfail = txpending = 0;
351 bf_next = bf->bf_next; 351 bf_next = bf->bf_next;
@@ -359,7 +359,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
359 acked_cnt++; 359 acked_cnt++;
360 } else { 360 } else {
361 if (!(tid->state & AGGR_CLEANUP) && 361 if (!(tid->state & AGGR_CLEANUP) &&
362 ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) { 362 ts->ts_flags != ATH9K_TX_SW_ABORTED) {
363 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 363 if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
364 ath_tx_set_retry(sc, txq, bf); 364 ath_tx_set_retry(sc, txq, bf);
365 txpending = 1; 365 txpending = 1;
@@ -402,13 +402,14 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
402 spin_unlock_bh(&txq->axq_lock); 402 spin_unlock_bh(&txq->axq_lock);
403 403
404 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 404 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
405 ath_tx_rc_status(bf, ds, nbad, txok, true); 405 ath_tx_rc_status(bf, ts, nbad, txok, true);
406 rc_update = false; 406 rc_update = false;
407 } else { 407 } else {
408 ath_tx_rc_status(bf, ds, nbad, txok, false); 408 ath_tx_rc_status(bf, ts, nbad, txok, false);
409 } 409 }
410 410
411 ath_tx_complete_buf(sc, bf, txq, &bf_head, !txfail, sendbar); 411 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
412 !txfail, sendbar);
412 } else { 413 } else {
413 /* retry the un-acked ones */ 414 /* retry the un-acked ones */
414 if (bf->bf_next == NULL && bf_last->bf_stale) { 415 if (bf->bf_next == NULL && bf_last->bf_stale) {
@@ -426,10 +427,10 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
426 spin_unlock_bh(&txq->axq_lock); 427 spin_unlock_bh(&txq->axq_lock);
427 428
428 bf->bf_state.bf_type |= BUF_XRETRY; 429 bf->bf_state.bf_type |= BUF_XRETRY;
429 ath_tx_rc_status(bf, ds, nbad, 430 ath_tx_rc_status(bf, ts, nbad,
430 0, false); 431 0, false);
431 ath_tx_complete_buf(sc, bf, txq, 432 ath_tx_complete_buf(sc, bf, txq,
432 &bf_head, 0, 0); 433 &bf_head, ts, 0, 0);
433 break; 434 break;
434 } 435 }
435 436
@@ -752,8 +753,11 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
752 struct ath_node *an = (struct ath_node *)sta->drv_priv; 753 struct ath_node *an = (struct ath_node *)sta->drv_priv;
753 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 754 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
754 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 755 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum];
756 struct ath_tx_status ts;
755 struct ath_buf *bf; 757 struct ath_buf *bf;
756 struct list_head bf_head; 758 struct list_head bf_head;
759
760 memset(&ts, 0, sizeof(ts));
757 INIT_LIST_HEAD(&bf_head); 761 INIT_LIST_HEAD(&bf_head);
758 762
759 if (txtid->state & AGGR_CLEANUP) 763 if (txtid->state & AGGR_CLEANUP)
@@ -780,7 +784,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
780 } 784 }
781 list_move_tail(&bf->list, &bf_head); 785 list_move_tail(&bf->list, &bf_head);
782 ath_tx_update_baw(sc, txtid, bf->bf_seqno); 786 ath_tx_update_baw(sc, txtid, bf->bf_seqno);
783 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 787 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
784 } 788 }
785 spin_unlock_bh(&txq->axq_lock); 789 spin_unlock_bh(&txq->axq_lock);
786 790
@@ -1028,6 +1032,11 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1028{ 1032{
1029 struct ath_buf *bf, *lastbf; 1033 struct ath_buf *bf, *lastbf;
1030 struct list_head bf_head; 1034 struct list_head bf_head;
1035 struct ath_tx_status ts;
1036
1037 memset(&ts, 0, sizeof(ts));
1038 if (!retry_tx)
1039 ts.ts_flags = ATH9K_TX_SW_ABORTED;
1031 1040
1032 INIT_LIST_HEAD(&bf_head); 1041 INIT_LIST_HEAD(&bf_head);
1033 1042
@@ -1053,9 +1062,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1053 } 1062 }
1054 1063
1055 lastbf = bf->bf_lastbf; 1064 lastbf = bf->bf_lastbf;
1056 if (!retry_tx)
1057 lastbf->bf_desc->ds_txstat.ts_flags =
1058 ATH9K_TX_SW_ABORTED;
1059 1065
1060 /* remove ath_buf's of the same mpdu from txq */ 1066 /* remove ath_buf's of the same mpdu from txq */
1061 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list); 1067 list_cut_position(&bf_head, &txq->axq_q, &lastbf->list);
@@ -1064,9 +1070,9 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1064 spin_unlock_bh(&txq->axq_lock); 1070 spin_unlock_bh(&txq->axq_lock);
1065 1071
1066 if (bf_isampdu(bf)) 1072 if (bf_isampdu(bf))
1067 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 0); 1073 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0);
1068 else 1074 else
1069 ath_tx_complete_buf(sc, bf, txq, &bf_head, 0, 0); 1075 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1070 } 1076 }
1071 1077
1072 spin_lock_bh(&txq->axq_lock); 1078 spin_lock_bh(&txq->axq_lock);
@@ -1568,12 +1574,12 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1568 1574
1569 tx_info->pad[0] = 0; 1575 tx_info->pad[0] = 0;
1570 switch (txctl->frame_type) { 1576 switch (txctl->frame_type) {
1571 case ATH9K_NOT_INTERNAL: 1577 case ATH9K_IFT_NOT_INTERNAL:
1572 break; 1578 break;
1573 case ATH9K_INT_PAUSE: 1579 case ATH9K_IFT_PAUSE:
1574 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE; 1580 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1575 /* fall through */ 1581 /* fall through */
1576 case ATH9K_INT_UNPAUSE: 1582 case ATH9K_IFT_UNPAUSE:
1577 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL; 1583 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1578 break; 1584 break;
1579 } 1585 }
@@ -1852,9 +1858,8 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1852} 1858}
1853 1859
1854static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 1860static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1855 struct ath_txq *txq, 1861 struct ath_txq *txq, struct list_head *bf_q,
1856 struct list_head *bf_q, 1862 struct ath_tx_status *ts, int txok, int sendbar)
1857 int txok, int sendbar)
1858{ 1863{
1859 struct sk_buff *skb = bf->bf_mpdu; 1864 struct sk_buff *skb = bf->bf_mpdu;
1860 unsigned long flags; 1865 unsigned long flags;
@@ -1872,7 +1877,7 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1872 1877
1873 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE); 1878 dma_unmap_single(sc->dev, bf->bf_dmacontext, skb->len, DMA_TO_DEVICE);
1874 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1879 ath_tx_complete(sc, skb, bf->aphy, tx_flags);
1875 ath_debug_stat_tx(sc, txq, bf); 1880 ath_debug_stat_tx(sc, txq, bf, ts);
1876 1881
1877 /* 1882 /*
1878 * Return the list of ath_buf of this mpdu to free queue 1883 * Return the list of ath_buf of this mpdu to free queue
@@ -1883,23 +1888,21 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1883} 1888}
1884 1889
1885static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf, 1890static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1886 int txok) 1891 struct ath_tx_status *ts, int txok)
1887{ 1892{
1888 struct ath_buf *bf_last = bf->bf_lastbf;
1889 struct ath_desc *ds = bf_last->bf_desc;
1890 u16 seq_st = 0; 1893 u16 seq_st = 0;
1891 u32 ba[WME_BA_BMP_SIZE >> 5]; 1894 u32 ba[WME_BA_BMP_SIZE >> 5];
1892 int ba_index; 1895 int ba_index;
1893 int nbad = 0; 1896 int nbad = 0;
1894 int isaggr = 0; 1897 int isaggr = 0;
1895 1898
1896 if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED) 1899 if (ts->ts_flags == ATH9K_TX_SW_ABORTED)
1897 return 0; 1900 return 0;
1898 1901
1899 isaggr = bf_isaggr(bf); 1902 isaggr = bf_isaggr(bf);
1900 if (isaggr) { 1903 if (isaggr) {
1901 seq_st = ATH_DS_BA_SEQ(ds); 1904 seq_st = ts->ts_seqnum;
1902 memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3); 1905 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
1903 } 1906 }
1904 1907
1905 while (bf) { 1908 while (bf) {
@@ -1913,7 +1916,7 @@ static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1913 return nbad; 1916 return nbad;
1914} 1917}
1915 1918
1916static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds, 1919static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1917 int nbad, int txok, bool update_rc) 1920 int nbad, int txok, bool update_rc)
1918{ 1921{
1919 struct sk_buff *skb = bf->bf_mpdu; 1922 struct sk_buff *skb = bf->bf_mpdu;
@@ -1923,24 +1926,24 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
1923 u8 i, tx_rateindex; 1926 u8 i, tx_rateindex;
1924 1927
1925 if (txok) 1928 if (txok)
1926 tx_info->status.ack_signal = ds->ds_txstat.ts_rssi; 1929 tx_info->status.ack_signal = ts->ts_rssi;
1927 1930
1928 tx_rateindex = ds->ds_txstat.ts_rateindex; 1931 tx_rateindex = ts->ts_rateindex;
1929 WARN_ON(tx_rateindex >= hw->max_rates); 1932 WARN_ON(tx_rateindex >= hw->max_rates);
1930 1933
1931 if (update_rc) 1934 if (ts->ts_status & ATH9K_TXERR_FILT)
1932 tx_info->pad[0] |= ATH_TX_INFO_UPDATE_RC;
1933 if (ds->ds_txstat.ts_status & ATH9K_TXERR_FILT)
1934 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED; 1935 tx_info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1936 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc)
1937 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1935 1938
1936 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 && 1939 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
1937 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1940 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1938 if (ieee80211_is_data(hdr->frame_control)) { 1941 if (ieee80211_is_data(hdr->frame_control)) {
1939 if (ds->ds_txstat.ts_flags & 1942 if (ts->ts_flags &
1940 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 1943 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN))
1941 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 1944 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN;
1942 if ((ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) || 1945 if ((ts->ts_status & ATH9K_TXERR_XRETRY) ||
1943 (ds->ds_txstat.ts_status & ATH9K_TXERR_FIFO)) 1946 (ts->ts_status & ATH9K_TXERR_FIFO))
1944 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 1947 tx_info->pad[0] |= ATH_TX_INFO_XRETRY;
1945 tx_info->status.ampdu_len = bf->bf_nframes; 1948 tx_info->status.ampdu_len = bf->bf_nframes;
1946 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 1949 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad;
@@ -1978,6 +1981,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1978 struct ath_buf *bf, *lastbf, *bf_held = NULL; 1981 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1979 struct list_head bf_head; 1982 struct list_head bf_head;
1980 struct ath_desc *ds; 1983 struct ath_desc *ds;
1984 struct ath_tx_status ts;
1981 int txok; 1985 int txok;
1982 int status; 1986 int status;
1983 1987
@@ -2017,7 +2021,8 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2017 lastbf = bf->bf_lastbf; 2021 lastbf = bf->bf_lastbf;
2018 ds = lastbf->bf_desc; 2022 ds = lastbf->bf_desc;
2019 2023
2020 status = ath9k_hw_txprocdesc(ah, ds); 2024 memset(&ts, 0, sizeof(ts));
2025 status = ath9k_hw_txprocdesc(ah, ds, &ts);
2021 if (status == -EINPROGRESS) { 2026 if (status == -EINPROGRESS) {
2022 spin_unlock_bh(&txq->axq_lock); 2027 spin_unlock_bh(&txq->axq_lock);
2023 break; 2028 break;
@@ -2028,7 +2033,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2028 * can disable RX. 2033 * can disable RX.
2029 */ 2034 */
2030 if (bf->bf_isnullfunc && 2035 if (bf->bf_isnullfunc &&
2031 (ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) { 2036 (ts.ts_status & ATH9K_TX_ACKED)) {
2032 if ((sc->ps_flags & PS_ENABLED)) 2037 if ((sc->ps_flags & PS_ENABLED))
2033 ath9k_enable_ps(sc); 2038 ath9k_enable_ps(sc);
2034 else 2039 else
@@ -2047,7 +2052,7 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2047 &txq->axq_q, lastbf->list.prev); 2052 &txq->axq_q, lastbf->list.prev);
2048 2053
2049 txq->axq_depth--; 2054 txq->axq_depth--;
2050 txok = !(ds->ds_txstat.ts_status & ATH9K_TXERR_MASK); 2055 txok = !(ts.ts_status & ATH9K_TXERR_MASK);
2051 txq->axq_tx_inprogress = false; 2056 txq->axq_tx_inprogress = false;
2052 spin_unlock_bh(&txq->axq_lock); 2057 spin_unlock_bh(&txq->axq_lock);
2053 2058
@@ -2062,16 +2067,16 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2062 * This frame is sent out as a single frame. 2067 * This frame is sent out as a single frame.
2063 * Use hardware retry status for this frame. 2068 * Use hardware retry status for this frame.
2064 */ 2069 */
2065 bf->bf_retries = ds->ds_txstat.ts_longretry; 2070 bf->bf_retries = ts.ts_longretry;
2066 if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY) 2071 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2067 bf->bf_state.bf_type |= BUF_XRETRY; 2072 bf->bf_state.bf_type |= BUF_XRETRY;
2068 ath_tx_rc_status(bf, ds, 0, txok, true); 2073 ath_tx_rc_status(bf, &ts, 0, txok, true);
2069 } 2074 }
2070 2075
2071 if (bf_isampdu(bf)) 2076 if (bf_isampdu(bf))
2072 ath_tx_complete_aggr(sc, txq, bf, &bf_head, txok); 2077 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok);
2073 else 2078 else
2074 ath_tx_complete_buf(sc, bf, txq, &bf_head, txok, 0); 2079 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2075 2080
2076 ath_wake_mac80211_queue(sc, txq); 2081 ath_wake_mac80211_queue(sc, txq);
2077 2082
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index 8263633c003c..873bf526e11f 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -59,6 +59,7 @@ enum ATH_DEBUG {
59 ATH_DBG_PS = 0x00000800, 59 ATH_DBG_PS = 0x00000800,
60 ATH_DBG_HWTIMER = 0x00001000, 60 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000, 61 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_WMI = 0x00004000,
62 ATH_DBG_ANY = 0xffffffff 63 ATH_DBG_ANY = 0xffffffff
63}; 64};
64 65
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
index ecc9eb01f4fa..a8f81ea09f14 100644
--- a/drivers/net/wireless/ath/hw.c
+++ b/drivers/net/wireless/ath/hw.c
@@ -19,8 +19,8 @@
19#include "ath.h" 19#include "ath.h"
20#include "reg.h" 20#include "reg.h"
21 21
22#define REG_READ common->ops->read 22#define REG_READ (common->ops->read)
23#define REG_WRITE common->ops->write 23#define REG_WRITE (common->ops->write)
24 24
25/** 25/**
26 * ath_hw_set_bssid_mask - filter out bssids we listen 26 * ath_hw_set_bssid_mask - filter out bssids we listen
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 00489c40be0c..24d59883d944 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -50,6 +50,7 @@
50 50
51#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \ 51#define ATH9K_5GHZ_ALL ATH9K_5GHZ_5150_5350, \
52 ATH9K_5GHZ_5470_5850 52 ATH9K_5GHZ_5470_5850
53
53/* This one skips what we call "mid band" */ 54/* This one skips what we call "mid band" */
54#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \ 55#define ATH9K_5GHZ_NO_MIDBAND ATH9K_5GHZ_5150_5350, \
55 ATH9K_5GHZ_5725_5850 56 ATH9K_5GHZ_5725_5850
@@ -360,7 +361,7 @@ EXPORT_SYMBOL(ath_reg_notifier_apply);
360 361
361static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg) 362static bool ath_regd_is_eeprom_valid(struct ath_regulatory *reg)
362{ 363{
363 u16 rd = ath_regd_get_eepromRD(reg); 364 u16 rd = ath_regd_get_eepromRD(reg);
364 int i; 365 int i;
365 366
366 if (rd & COUNTRY_ERD_FLAG) { 367 if (rd & COUNTRY_ERD_FLAG) {
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index b8807fb12c92..3a003e6803a5 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -104,6 +104,7 @@
104#define B43_MMIO_MACFILTER_CONTROL 0x420 104#define B43_MMIO_MACFILTER_CONTROL 0x420
105#define B43_MMIO_MACFILTER_DATA 0x422 105#define B43_MMIO_MACFILTER_DATA 0x422
106#define B43_MMIO_RCMTA_COUNT 0x43C 106#define B43_MMIO_RCMTA_COUNT 0x43C
107#define B43_MMIO_PSM_PHY_HDR 0x492
107#define B43_MMIO_RADIO_HWENABLED_LO 0x49A 108#define B43_MMIO_RADIO_HWENABLED_LO 0x49A
108#define B43_MMIO_GPIO_CONTROL 0x49C 109#define B43_MMIO_GPIO_CONTROL 0x49C
109#define B43_MMIO_GPIO_MASK 0x49E 110#define B43_MMIO_GPIO_MASK 0x49E
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 9a374ef83a22..997303bcf4ae 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -4349,11 +4349,10 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
4349 b43_set_phytxctl_defaults(dev); 4349 b43_set_phytxctl_defaults(dev);
4350 4350
4351 /* Minimum Contention Window */ 4351 /* Minimum Contention Window */
4352 if (phy->type == B43_PHYTYPE_B) { 4352 if (phy->type == B43_PHYTYPE_B)
4353 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F); 4353 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0x1F);
4354 } else { 4354 else
4355 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF); 4355 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MINCONT, 0xF);
4356 }
4357 /* Maximum Contention Window */ 4356 /* Maximum Contention Window */
4358 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF); 4357 b43_shm_write16(dev, B43_SHM_SCRATCH, B43_SHM_SC_MAXCONT, 0x3FF);
4359 4358
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 9c7cd282e46c..3d6b33775964 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -73,6 +73,22 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
73 u16 value, u8 core, bool off); 73 u16 value, u8 core, bool off);
74static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, 74static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
75 u16 value, u8 core); 75 u16 value, u8 core);
76static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel);
77
78static inline bool b43_empty_chanspec(struct b43_chanspec *chanspec)
79{
80 return !chanspec->channel && !chanspec->sideband &&
81 !chanspec->b_width && !chanspec->b_freq;
82}
83
84static inline bool b43_eq_chanspecs(struct b43_chanspec *chanspec1,
85 struct b43_chanspec *chanspec2)
86{
87 return (chanspec1->channel == chanspec2->channel &&
88 chanspec1->sideband == chanspec2->sideband &&
89 chanspec1->b_width == chanspec2->b_width &&
90 chanspec1->b_freq == chanspec2->b_freq);
91}
76 92
77void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 93void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
78{//TODO 94{//TODO
@@ -89,34 +105,44 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev,
89} 105}
90 106
91static void b43_chantab_radio_upload(struct b43_wldev *dev, 107static void b43_chantab_radio_upload(struct b43_wldev *dev,
92 const struct b43_nphy_channeltab_entry *e) 108 const struct b43_nphy_channeltab_entry_rev2 *e)
93{ 109{
94 b43_radio_write16(dev, B2055_PLL_REF, e->radio_pll_ref); 110 b43_radio_write(dev, B2055_PLL_REF, e->radio_pll_ref);
95 b43_radio_write16(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0); 111 b43_radio_write(dev, B2055_RF_PLLMOD0, e->radio_rf_pllmod0);
96 b43_radio_write16(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1); 112 b43_radio_write(dev, B2055_RF_PLLMOD1, e->radio_rf_pllmod1);
97 b43_radio_write16(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail); 113 b43_radio_write(dev, B2055_VCO_CAPTAIL, e->radio_vco_captail);
98 b43_radio_write16(dev, B2055_VCO_CAL1, e->radio_vco_cal1); 114 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
99 b43_radio_write16(dev, B2055_VCO_CAL2, e->radio_vco_cal2); 115
100 b43_radio_write16(dev, B2055_PLL_LFC1, e->radio_pll_lfc1); 116 b43_radio_write(dev, B2055_VCO_CAL1, e->radio_vco_cal1);
101 b43_radio_write16(dev, B2055_PLL_LFR1, e->radio_pll_lfr1); 117 b43_radio_write(dev, B2055_VCO_CAL2, e->radio_vco_cal2);
102 b43_radio_write16(dev, B2055_PLL_LFC2, e->radio_pll_lfc2); 118 b43_radio_write(dev, B2055_PLL_LFC1, e->radio_pll_lfc1);
103 b43_radio_write16(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf); 119 b43_radio_write(dev, B2055_PLL_LFR1, e->radio_pll_lfr1);
104 b43_radio_write16(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1); 120 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
105 b43_radio_write16(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2); 121
106 b43_radio_write16(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune); 122 b43_radio_write(dev, B2055_PLL_LFC2, e->radio_pll_lfc2);
107 b43_radio_write16(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune); 123 b43_radio_write(dev, B2055_LGBUF_CENBUF, e->radio_lgbuf_cenbuf);
108 b43_radio_write16(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1); 124 b43_radio_write(dev, B2055_LGEN_TUNE1, e->radio_lgen_tune1);
109 b43_radio_write16(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn); 125 b43_radio_write(dev, B2055_LGEN_TUNE2, e->radio_lgen_tune2);
110 b43_radio_write16(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim); 126 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
111 b43_radio_write16(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune); 127
112 b43_radio_write16(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune); 128 b43_radio_write(dev, B2055_C1_LGBUF_ATUNE, e->radio_c1_lgbuf_atune);
113 b43_radio_write16(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1); 129 b43_radio_write(dev, B2055_C1_LGBUF_GTUNE, e->radio_c1_lgbuf_gtune);
114 b43_radio_write16(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn); 130 b43_radio_write(dev, B2055_C1_RX_RFR1, e->radio_c1_rx_rfr1);
115 b43_radio_write16(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); 131 b43_radio_write(dev, B2055_C1_TX_PGAPADTN, e->radio_c1_tx_pgapadtn);
132 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
133
134 b43_radio_write(dev, B2055_C1_TX_MXBGTRIM, e->radio_c1_tx_mxbgtrim);
135 b43_radio_write(dev, B2055_C2_LGBUF_ATUNE, e->radio_c2_lgbuf_atune);
136 b43_radio_write(dev, B2055_C2_LGBUF_GTUNE, e->radio_c2_lgbuf_gtune);
137 b43_radio_write(dev, B2055_C2_RX_RFR1, e->radio_c2_rx_rfr1);
138 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
139
140 b43_radio_write(dev, B2055_C2_TX_PGAPADTN, e->radio_c2_tx_pgapadtn);
141 b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
116} 142}
117 143
118static void b43_chantab_phy_upload(struct b43_wldev *dev, 144static void b43_chantab_phy_upload(struct b43_wldev *dev,
119 const struct b43_nphy_channeltab_entry *e) 145 const struct b43_phy_n_sfo_cfg *e)
120{ 146{
121 b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a); 147 b43_phy_write(dev, B43_NPHY_BW1A, e->phy_bw1a);
122 b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2); 148 b43_phy_write(dev, B43_NPHY_BW2, e->phy_bw2);
@@ -131,34 +157,20 @@ static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
131 //TODO 157 //TODO
132} 158}
133 159
134/* Tune the hardware to a new channel. */
135static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
136{
137 const struct b43_nphy_channeltab_entry *tabent;
138 160
139 tabent = b43_nphy_get_chantabent(dev, channel); 161/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2055Setup */
140 if (!tabent) 162static void b43_radio_2055_setup(struct b43_wldev *dev,
141 return -ESRCH; 163 const struct b43_nphy_channeltab_entry_rev2 *e)
164{
165 B43_WARN_ON(dev->phy.rev >= 3);
142 166
143 //FIXME enable/disable band select upper20 in RXCTL 167 b43_chantab_radio_upload(dev, e);
144 if (0 /*FIXME 5Ghz*/)
145 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x20);
146 else
147 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, 0x50);
148 b43_chantab_radio_upload(dev, tabent);
149 udelay(50); 168 udelay(50);
150 b43_radio_write16(dev, B2055_VCO_CAL10, 5); 169 b43_radio_write(dev, B2055_VCO_CAL10, 0x05);
151 b43_radio_write16(dev, B2055_VCO_CAL10, 45); 170 b43_radio_write(dev, B2055_VCO_CAL10, 0x45);
152 b43_radio_write16(dev, B2055_VCO_CAL10, 65); 171 b43_read32(dev, B43_MMIO_MACCTL); /* flush writes */
172 b43_radio_write(dev, B2055_VCO_CAL10, 0x65);
153 udelay(300); 173 udelay(300);
154 if (0 /*FIXME 5Ghz*/)
155 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
156 else
157 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
158 b43_chantab_phy_upload(dev, tabent);
159 b43_nphy_tx_power_fix(dev);
160
161 return 0;
162} 174}
163 175
164static void b43_radio_init2055_pre(struct b43_wldev *dev) 176static void b43_radio_init2055_pre(struct b43_wldev *dev)
@@ -174,52 +186,64 @@ static void b43_radio_init2055_pre(struct b43_wldev *dev)
174 186
175static void b43_radio_init2055_post(struct b43_wldev *dev) 187static void b43_radio_init2055_post(struct b43_wldev *dev)
176{ 188{
189 struct b43_phy_n *nphy = dev->phy.n;
177 struct ssb_sprom *sprom = &(dev->dev->bus->sprom); 190 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
178 struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo); 191 struct ssb_boardinfo *binfo = &(dev->dev->bus->boardinfo);
179 int i; 192 int i;
180 u16 val; 193 u16 val;
194 bool workaround = false;
195
196 if (sprom->revision < 4)
197 workaround = (binfo->vendor != PCI_VENDOR_ID_BROADCOM ||
198 binfo->type != 0x46D ||
199 binfo->rev < 0x41);
200 else
201 workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0);
181 202
182 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); 203 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
183 msleep(1); 204 if (workaround) {
184 if ((sprom->revision != 4) || 205 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
185 !(sprom->boardflags_hi & B43_BFH_RSSIINV)) { 206 b43_radio_mask(dev, B2055_C2_RX_BB_REG, 0x7F);
186 if ((binfo->vendor != PCI_VENDOR_ID_BROADCOM) ||
187 (binfo->type != 0x46D) ||
188 (binfo->rev < 0x41)) {
189 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
190 b43_radio_mask(dev, B2055_C1_RX_BB_REG, 0x7F);
191 msleep(1);
192 }
193 } 207 }
194 b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0x3F, 0x2C); 208 b43_radio_maskset(dev, B2055_RRCCAL_NOPTSEL, 0xFFC0, 0x2C);
195 msleep(1); 209 b43_radio_write(dev, B2055_CAL_MISC, 0x3C);
196 b43_radio_write16(dev, B2055_CAL_MISC, 0x3C);
197 msleep(1);
198 b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE); 210 b43_radio_mask(dev, B2055_CAL_MISC, 0xFFBE);
199 msleep(1);
200 b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80); 211 b43_radio_set(dev, B2055_CAL_LPOCTL, 0x80);
201 msleep(1);
202 b43_radio_set(dev, B2055_CAL_MISC, 0x1); 212 b43_radio_set(dev, B2055_CAL_MISC, 0x1);
203 msleep(1); 213 msleep(1);
204 b43_radio_set(dev, B2055_CAL_MISC, 0x40); 214 b43_radio_set(dev, B2055_CAL_MISC, 0x40);
205 msleep(1); 215 for (i = 0; i < 200; i++) {
206 for (i = 0; i < 100; i++) { 216 val = b43_radio_read(dev, B2055_CAL_COUT2);
207 val = b43_radio_read16(dev, B2055_CAL_COUT2); 217 if (val & 0x80) {
208 if (val & 0x80) 218 i = 0;
209 break; 219 break;
220 }
210 udelay(10); 221 udelay(10);
211 } 222 }
212 msleep(1); 223 if (i)
224 b43err(dev->wl, "radio post init timeout\n");
213 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F); 225 b43_radio_mask(dev, B2055_CAL_LPOCTL, 0xFF7F);
214 msleep(1);
215 nphy_channel_switch(dev, dev->phy.channel); 226 nphy_channel_switch(dev, dev->phy.channel);
216 b43_radio_write16(dev, B2055_C1_RX_BB_LPF, 0x9); 227 b43_radio_write(dev, B2055_C1_RX_BB_LPF, 0x9);
217 b43_radio_write16(dev, B2055_C2_RX_BB_LPF, 0x9); 228 b43_radio_write(dev, B2055_C2_RX_BB_LPF, 0x9);
218 b43_radio_write16(dev, B2055_C1_RX_BB_MIDACHP, 0x83); 229 b43_radio_write(dev, B2055_C1_RX_BB_MIDACHP, 0x83);
219 b43_radio_write16(dev, B2055_C2_RX_BB_MIDACHP, 0x83); 230 b43_radio_write(dev, B2055_C2_RX_BB_MIDACHP, 0x83);
231 b43_radio_maskset(dev, B2055_C1_LNA_GAINBST, 0xFFF8, 0x6);
232 b43_radio_maskset(dev, B2055_C2_LNA_GAINBST, 0xFFF8, 0x6);
233 if (!nphy->gain_boost) {
234 b43_radio_set(dev, B2055_C1_RX_RFSPC1, 0x2);
235 b43_radio_set(dev, B2055_C2_RX_RFSPC1, 0x2);
236 } else {
237 b43_radio_mask(dev, B2055_C1_RX_RFSPC1, 0xFFFD);
238 b43_radio_mask(dev, B2055_C2_RX_RFSPC1, 0xFFFD);
239 }
240 udelay(2);
220} 241}
221 242
222/* Initialize a Broadcom 2055 N-radio */ 243/*
244 * Initialize a Broadcom 2055 N-radio
245 * http://bcm-v4.sipsolutions.net/802.11/Radio/2055/Init
246 */
223static void b43_radio_init2055(struct b43_wldev *dev) 247static void b43_radio_init2055(struct b43_wldev *dev)
224{ 248{
225 b43_radio_init2055_pre(dev); 249 b43_radio_init2055_pre(dev);
@@ -230,16 +254,15 @@ static void b43_radio_init2055(struct b43_wldev *dev)
230 b43_radio_init2055_post(dev); 254 b43_radio_init2055_post(dev);
231} 255}
232 256
233void b43_nphy_radio_turn_on(struct b43_wldev *dev) 257/*
258 * Initialize a Broadcom 2056 N-radio
259 * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
260 */
261static void b43_radio_init2056(struct b43_wldev *dev)
234{ 262{
235 b43_radio_init2055(dev); 263 /* TODO */
236} 264}
237 265
238void b43_nphy_radio_turn_off(struct b43_wldev *dev)
239{
240 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
241 ~B43_NPHY_RFCTL_CMD_EN);
242}
243 266
244/* 267/*
245 * Upload the N-PHY tables. 268 * Upload the N-PHY tables.
@@ -647,6 +670,41 @@ static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
647 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES); 670 clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
648} 671}
649 672
673/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SuperSwitchInit */
674static void b43_nphy_superswitch_init(struct b43_wldev *dev, bool init)
675{
676 if (dev->phy.rev >= 3) {
677 if (!init)
678 return;
679 if (0 /* FIXME */) {
680 b43_ntab_write(dev, B43_NTAB16(9, 2), 0x211);
681 b43_ntab_write(dev, B43_NTAB16(9, 3), 0x222);
682 b43_ntab_write(dev, B43_NTAB16(9, 8), 0x144);
683 b43_ntab_write(dev, B43_NTAB16(9, 12), 0x188);
684 }
685 } else {
686 b43_phy_write(dev, B43_NPHY_GPIO_LOOEN, 0);
687 b43_phy_write(dev, B43_NPHY_GPIO_HIOEN, 0);
688
689 ssb_chipco_gpio_control(&dev->dev->bus->chipco, 0xFC00,
690 0xFC00);
691 b43_write32(dev, B43_MMIO_MACCTL,
692 b43_read32(dev, B43_MMIO_MACCTL) &
693 ~B43_MACCTL_GPOUTSMSK);
694 b43_write16(dev, B43_MMIO_GPIO_MASK,
695 b43_read16(dev, B43_MMIO_GPIO_MASK) | 0xFC00);
696 b43_write16(dev, B43_MMIO_GPIO_CONTROL,
697 b43_read16(dev, B43_MMIO_GPIO_CONTROL) & ~0xFC00);
698
699 if (init) {
700 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
701 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
702 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
703 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
704 }
705 }
706}
707
650/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */ 708/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
651static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val) 709static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
652{ 710{
@@ -723,7 +781,7 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
723{ 781{
724 struct b43_phy_n *nphy = dev->phy.n; 782 struct b43_phy_n *nphy = dev->phy.n;
725 783
726 unsigned int channel; 784 u8 channel = nphy->radio_chanspec.channel;
727 int tone[2] = { 57, 58 }; 785 int tone[2] = { 57, 58 };
728 u32 noise[2] = { 0x3FF, 0x3FF }; 786 u32 noise[2] = { 0x3FF, 0x3FF };
729 787
@@ -732,8 +790,6 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
732 if (nphy->hang_avoid) 790 if (nphy->hang_avoid)
733 b43_nphy_stay_in_carrier_search(dev, 1); 791 b43_nphy_stay_in_carrier_search(dev, 1);
734 792
735 /* FIXME: channel = radio_chanspec */
736
737 if (nphy->gband_spurwar_en) { 793 if (nphy->gband_spurwar_en) {
738 /* TODO: N PHY Adjust Analog Pfbw (7) */ 794 /* TODO: N PHY Adjust Analog Pfbw (7) */
739 if (channel == 11 && dev->phy.is_40mhz) 795 if (channel == 11 && dev->phy.is_40mhz)
@@ -779,6 +835,62 @@ static void b43_nphy_spur_workaround(struct b43_wldev *dev)
779 b43_nphy_stay_in_carrier_search(dev, 0); 835 b43_nphy_stay_in_carrier_search(dev, 0);
780} 836}
781 837
838/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/AdjustLnaGainTbl */
839static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
840{
841 struct b43_phy_n *nphy = dev->phy.n;
842
843 u8 i;
844 s16 tmp;
845 u16 data[4];
846 s16 gain[2];
847 u16 minmax[2];
848 u16 lna_gain[4] = { -2, 10, 19, 25 };
849
850 if (nphy->hang_avoid)
851 b43_nphy_stay_in_carrier_search(dev, 1);
852
853 if (nphy->gain_boost) {
854 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
855 gain[0] = 6;
856 gain[1] = 6;
857 } else {
858 tmp = 40370 - 315 * nphy->radio_chanspec.channel;
859 gain[0] = ((tmp >> 13) + ((tmp >> 12) & 1));
860 tmp = 23242 - 224 * nphy->radio_chanspec.channel;
861 gain[1] = ((tmp >> 13) + ((tmp >> 12) & 1));
862 }
863 } else {
864 gain[0] = 0;
865 gain[1] = 0;
866 }
867
868 for (i = 0; i < 2; i++) {
869 if (nphy->elna_gain_config) {
870 data[0] = 19 + gain[i];
871 data[1] = 25 + gain[i];
872 data[2] = 25 + gain[i];
873 data[3] = 25 + gain[i];
874 } else {
875 data[0] = lna_gain[0] + gain[i];
876 data[1] = lna_gain[1] + gain[i];
877 data[2] = lna_gain[2] + gain[i];
878 data[3] = lna_gain[3] + gain[i];
879 }
880 b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data);
881
882 minmax[i] = 23 + gain[i];
883 }
884
885 b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN, ~B43_NPHY_C1_MINGAIN,
886 minmax[0] << B43_NPHY_C1_MINGAIN_SHIFT);
887 b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN, ~B43_NPHY_C2_MINGAIN,
888 minmax[1] << B43_NPHY_C2_MINGAIN_SHIFT);
889
890 if (nphy->hang_avoid)
891 b43_nphy_stay_in_carrier_search(dev, 0);
892}
893
782/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */ 894/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
783static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev) 895static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
784{ 896{
@@ -863,7 +975,7 @@ static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
863 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 975 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
864 (code << 8 | 0x7C)); 976 (code << 8 | 0x7C));
865 977
866 /* TODO: b43_nphy_adjust_lna_gain_table(dev); */ 978 b43_nphy_adjust_lna_gain_table(dev);
867 979
868 if (nphy->elna_gain_config) { 980 if (nphy->elna_gain_config) {
869 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808); 981 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
@@ -1970,12 +2082,12 @@ static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
1970 u16 *rssical_phy_regs = NULL; 2082 u16 *rssical_phy_regs = NULL;
1971 2083
1972 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2084 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1973 if (!nphy->rssical_chanspec_2G) 2085 if (b43_empty_chanspec(&nphy->rssical_chanspec_2G))
1974 return; 2086 return;
1975 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G; 2087 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
1976 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G; 2088 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
1977 } else { 2089 } else {
1978 if (!nphy->rssical_chanspec_5G) 2090 if (b43_empty_chanspec(&nphy->rssical_chanspec_5G))
1979 return; 2091 return;
1980 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G; 2092 rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
1981 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G; 2093 rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
@@ -2395,7 +2507,7 @@ static void b43_nphy_save_cal(struct b43_wldev *dev)
2395 2507
2396 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 2508 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2397 u16 *txcal_radio_regs = NULL; 2509 u16 *txcal_radio_regs = NULL;
2398 u8 *iqcal_chanspec; 2510 struct b43_chanspec *iqcal_chanspec;
2399 u16 *table = NULL; 2511 u16 *table = NULL;
2400 2512
2401 if (nphy->hang_avoid) 2513 if (nphy->hang_avoid)
@@ -2451,12 +2563,12 @@ static void b43_nphy_restore_cal(struct b43_wldev *dev)
2451 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL; 2563 struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
2452 2564
2453 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { 2565 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
2454 if (nphy->iqcal_chanspec_2G == 0) 2566 if (b43_empty_chanspec(&nphy->iqcal_chanspec_2G))
2455 return; 2567 return;
2456 table = nphy->cal_cache.txcal_coeffs_2G; 2568 table = nphy->cal_cache.txcal_coeffs_2G;
2457 loft = &nphy->cal_cache.txcal_coeffs_2G[5]; 2569 loft = &nphy->cal_cache.txcal_coeffs_2G[5];
2458 } else { 2570 } else {
2459 if (nphy->iqcal_chanspec_5G == 0) 2571 if (b43_empty_chanspec(&nphy->iqcal_chanspec_5G))
2460 return; 2572 return;
2461 table = nphy->cal_cache.txcal_coeffs_5G; 2573 table = nphy->cal_cache.txcal_coeffs_5G;
2462 loft = &nphy->cal_cache.txcal_coeffs_5G[5]; 2574 loft = &nphy->cal_cache.txcal_coeffs_5G[5];
@@ -2689,7 +2801,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2689 } 2801 }
2690 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, 2802 b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
2691 buffer); 2803 buffer);
2692 b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2, 2804 b43_ntab_read_bulk(dev, B43_NTAB16(15, 101), 2,
2693 buffer); 2805 buffer);
2694 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, 2806 b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
2695 buffer); 2807 buffer);
@@ -2701,8 +2813,7 @@ static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
2701 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length, 2813 b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
2702 nphy->txiqlocal_bestc); 2814 nphy->txiqlocal_bestc);
2703 nphy->txiqlocal_coeffsvalid = true; 2815 nphy->txiqlocal_coeffsvalid = true;
2704 /* TODO: Set nphy->txiqlocal_chanspec to 2816 nphy->txiqlocal_chanspec = nphy->radio_chanspec;
2705 the current channel */
2706 } else { 2817 } else {
2707 length = 11; 2818 length = 11;
2708 if (dev->phy.rev < 3) 2819 if (dev->phy.rev < 3)
@@ -2737,7 +2848,8 @@ static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
2737 u16 buffer[7]; 2848 u16 buffer[7];
2738 bool equal = true; 2849 bool equal = true;
2739 2850
2740 if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */) 2851 if (!nphy->txiqlocal_coeffsvalid ||
2852 b43_eq_chanspecs(&nphy->txiqlocal_chanspec, &nphy->radio_chanspec))
2741 return; 2853 return;
2742 2854
2743 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer); 2855 b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
@@ -3092,9 +3204,11 @@ int b43_phy_initn(struct b43_wldev *dev)
3092 do_rssi_cal = false; 3204 do_rssi_cal = false;
3093 if (phy->rev >= 3) { 3205 if (phy->rev >= 3) {
3094 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3206 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3095 do_rssi_cal = (nphy->rssical_chanspec_2G == 0); 3207 do_rssi_cal =
3208 b43_empty_chanspec(&nphy->rssical_chanspec_2G);
3096 else 3209 else
3097 do_rssi_cal = (nphy->rssical_chanspec_5G == 0); 3210 do_rssi_cal =
3211 b43_empty_chanspec(&nphy->rssical_chanspec_5G);
3098 3212
3099 if (do_rssi_cal) 3213 if (do_rssi_cal)
3100 b43_nphy_rssi_cal(dev); 3214 b43_nphy_rssi_cal(dev);
@@ -3106,9 +3220,9 @@ int b43_phy_initn(struct b43_wldev *dev)
3106 3220
3107 if (!((nphy->measure_hold & 0x6) != 0)) { 3221 if (!((nphy->measure_hold & 0x6) != 0)) {
3108 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 3222 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3109 do_cal = (nphy->iqcal_chanspec_2G == 0); 3223 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_2G);
3110 else 3224 else
3111 do_cal = (nphy->iqcal_chanspec_5G == 0); 3225 do_cal = b43_empty_chanspec(&nphy->iqcal_chanspec_5G);
3112 3226
3113 if (nphy->mute) 3227 if (nphy->mute)
3114 do_cal = false; 3228 do_cal = false;
@@ -3117,7 +3231,7 @@ int b43_phy_initn(struct b43_wldev *dev)
3117 target = b43_nphy_get_tx_gains(dev); 3231 target = b43_nphy_get_tx_gains(dev);
3118 3232
3119 if (nphy->antsel_type == 2) 3233 if (nphy->antsel_type == 2)
3120 ;/*TODO NPHY Superswitch Init with argument 1*/ 3234 b43_nphy_superswitch_init(dev, true);
3121 if (nphy->perical != 2) { 3235 if (nphy->perical != 2) {
3122 b43_nphy_rssi_cal(dev); 3236 b43_nphy_rssi_cal(dev);
3123 if (phy->rev >= 3) { 3237 if (phy->rev >= 3) {
@@ -3155,6 +3269,133 @@ int b43_phy_initn(struct b43_wldev *dev)
3155 return 0; 3269 return 0;
3156} 3270}
3157 3271
3272/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ChanspecSetup */
3273static void b43_nphy_chanspec_setup(struct b43_wldev *dev,
3274 const struct b43_phy_n_sfo_cfg *e,
3275 struct b43_chanspec chanspec)
3276{
3277 struct b43_phy *phy = &dev->phy;
3278 struct b43_phy_n *nphy = dev->phy.n;
3279
3280 u16 tmp;
3281 u32 tmp32;
3282
3283 tmp = b43_phy_read(dev, B43_NPHY_BANDCTL) & B43_NPHY_BANDCTL_5GHZ;
3284 if (chanspec.b_freq == 1 && tmp == 0) {
3285 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3286 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3287 b43_phy_set(dev, B43_PHY_B_BBCFG, 0xC000);
3288 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3289 b43_phy_set(dev, B43_NPHY_BANDCTL, B43_NPHY_BANDCTL_5GHZ);
3290 } else if (chanspec.b_freq == 1) {
3291 b43_phy_mask(dev, B43_NPHY_BANDCTL, ~B43_NPHY_BANDCTL_5GHZ);
3292 tmp32 = b43_read32(dev, B43_MMIO_PSM_PHY_HDR);
3293 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32 | 4);
3294 b43_phy_mask(dev, B43_PHY_B_BBCFG, (u16)~0xC000);
3295 b43_write32(dev, B43_MMIO_PSM_PHY_HDR, tmp32);
3296 }
3297
3298 b43_chantab_phy_upload(dev, e);
3299
3300 tmp = chanspec.channel;
3301 if (chanspec.b_freq == 1)
3302 tmp |= 0x0100;
3303 if (chanspec.b_width == 3)
3304 tmp |= 0x0200;
3305 b43_shm_write16(dev, B43_SHM_SHARED, 0xA0, tmp);
3306
3307 if (nphy->radio_chanspec.channel == 14) {
3308 b43_nphy_classifier(dev, 2, 0);
3309 b43_phy_set(dev, B43_PHY_B_TEST, 0x0800);
3310 } else {
3311 b43_nphy_classifier(dev, 2, 2);
3312 if (chanspec.b_freq == 2)
3313 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
3314 }
3315
3316 if (nphy->txpwrctrl)
3317 b43_nphy_tx_power_fix(dev);
3318
3319 if (dev->phy.rev < 3)
3320 b43_nphy_adjust_lna_gain_table(dev);
3321
3322 b43_nphy_tx_lp_fbw(dev);
3323
3324 if (dev->phy.rev >= 3 && 0) {
3325 /* TODO */
3326 }
3327
3328 b43_phy_write(dev, B43_NPHY_NDATAT_DUP40, 0x3830);
3329
3330 if (phy->rev >= 3)
3331 b43_nphy_spur_workaround(dev);
3332}
3333
3334/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetChanspec */
3335static int b43_nphy_set_chanspec(struct b43_wldev *dev,
3336 struct b43_chanspec chanspec)
3337{
3338 struct b43_phy_n *nphy = dev->phy.n;
3339
3340 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
3341 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
3342
3343 u8 tmp;
3344 u8 channel = chanspec.channel;
3345
3346 if (dev->phy.rev >= 3) {
3347 /* TODO */
3348 tabent_r3 = NULL;
3349 if (!tabent_r3)
3350 return -ESRCH;
3351 } else {
3352 tabent_r2 = b43_nphy_get_chantabent_rev2(dev, channel);
3353 if (!tabent_r2)
3354 return -ESRCH;
3355 }
3356
3357 nphy->radio_chanspec = chanspec;
3358
3359 if (chanspec.b_width != nphy->b_width)
3360 ; /* TODO: BMAC BW Set (chanspec.b_width) */
3361
3362 /* TODO: use defines */
3363 if (chanspec.b_width == 3) {
3364 if (chanspec.sideband == 2)
3365 b43_phy_set(dev, B43_NPHY_RXCTL,
3366 B43_NPHY_RXCTL_BSELU20);
3367 else
3368 b43_phy_mask(dev, B43_NPHY_RXCTL,
3369 ~B43_NPHY_RXCTL_BSELU20);
3370 }
3371
3372 if (dev->phy.rev >= 3) {
3373 tmp = (chanspec.b_freq == 1) ? 4 : 0;
3374 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
3375 /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */
3376 b43_nphy_chanspec_setup(dev, &(tabent_r3->phy_regs), chanspec);
3377 } else {
3378 tmp = (chanspec.b_freq == 1) ? 0x0020 : 0x0050;
3379 b43_radio_maskset(dev, B2055_MASTER1, 0xFF8F, tmp);
3380 b43_radio_2055_setup(dev, tabent_r2);
3381 b43_nphy_chanspec_setup(dev, &(tabent_r2->phy_regs), chanspec);
3382 }
3383
3384 return 0;
3385}
3386
3387/* Tune the hardware to a new channel */
3388static int nphy_channel_switch(struct b43_wldev *dev, unsigned int channel)
3389{
3390 struct b43_phy_n *nphy = dev->phy.n;
3391
3392 struct b43_chanspec chanspec;
3393 chanspec = nphy->radio_chanspec;
3394 chanspec.channel = channel;
3395
3396 return b43_nphy_set_chanspec(dev, chanspec);
3397}
3398
3158static int b43_nphy_op_allocate(struct b43_wldev *dev) 3399static int b43_nphy_op_allocate(struct b43_wldev *dev)
3159{ 3400{
3160 struct b43_phy_n *nphy; 3401 struct b43_phy_n *nphy;
@@ -3243,9 +3484,43 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
3243 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); 3484 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
3244} 3485}
3245 3486
3487/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
3246static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 3488static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3247 bool blocked) 3489 bool blocked)
3248{//TODO 3490{
3491 struct b43_phy_n *nphy = dev->phy.n;
3492
3493 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
3494 b43err(dev->wl, "MAC not suspended\n");
3495
3496 if (blocked) {
3497 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
3498 ~B43_NPHY_RFCTL_CMD_CHIP0PU);
3499 if (dev->phy.rev >= 3) {
3500 b43_radio_mask(dev, 0x09, ~0x2);
3501
3502 b43_radio_write(dev, 0x204D, 0);
3503 b43_radio_write(dev, 0x2053, 0);
3504 b43_radio_write(dev, 0x2058, 0);
3505 b43_radio_write(dev, 0x205E, 0);
3506 b43_radio_mask(dev, 0x2062, ~0xF0);
3507 b43_radio_write(dev, 0x2064, 0);
3508
3509 b43_radio_write(dev, 0x304D, 0);
3510 b43_radio_write(dev, 0x3053, 0);
3511 b43_radio_write(dev, 0x3058, 0);
3512 b43_radio_write(dev, 0x305E, 0);
3513 b43_radio_mask(dev, 0x3062, ~0xF0);
3514 b43_radio_write(dev, 0x3064, 0);
3515 }
3516 } else {
3517 if (dev->phy.rev >= 3) {
3518 b43_radio_init2056(dev);
3519 b43_nphy_set_chanspec(dev, nphy->radio_chanspec);
3520 } else {
3521 b43_radio_init2055(dev);
3522 }
3523 }
3249} 3524}
3250 3525
3251static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on) 3526static void b43_nphy_op_switch_analog(struct b43_wldev *dev, bool on)
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 403aad3f894f..8b6d570dd0aa 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -711,6 +711,8 @@
711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */ 711#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */ 712#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
713 713
714#define B43_PHY_B_BBCFG B43_PHY_N_BMODE(0x001) /* BB config */
715#define B43_PHY_B_TEST B43_PHY_N_BMODE(0x00A)
714 716
715 717
716/* Broadcom 2055 radio registers */ 718/* Broadcom 2055 radio registers */
@@ -924,6 +926,13 @@
924 926
925struct b43_wldev; 927struct b43_wldev;
926 928
929struct b43_chanspec {
930 u8 channel;
931 u8 sideband;
932 u8 b_width;
933 u8 b_freq;
934};
935
927struct b43_phy_n_iq_comp { 936struct b43_phy_n_iq_comp {
928 s16 a0; 937 s16 a0;
929 s16 b0; 938 s16 b0;
@@ -975,7 +984,8 @@ struct b43_phy_n {
975 u16 papd_epsilon_offset[2]; 984 u16 papd_epsilon_offset[2];
976 s32 preamble_override; 985 s32 preamble_override;
977 u32 bb_mult_save; 986 u32 bb_mult_save;
978 u16 radio_chanspec; 987 u8 b_width;
988 struct b43_chanspec radio_chanspec;
979 989
980 bool gain_boost; 990 bool gain_boost;
981 bool elna_gain_config; 991 bool elna_gain_config;
@@ -991,6 +1001,7 @@ struct b43_phy_n {
991 u16 txiqlocal_bestc[11]; 1001 u16 txiqlocal_bestc[11];
992 bool txiqlocal_coeffsvalid; 1002 bool txiqlocal_coeffsvalid;
993 struct b43_phy_n_txpwrindex txpwrindex[2]; 1003 struct b43_phy_n_txpwrindex txpwrindex[2];
1004 struct b43_chanspec txiqlocal_chanspec;
994 1005
995 u8 txrx_chain; 1006 u8 txrx_chain;
996 u16 tx_rx_cal_phy_saveregs[11]; 1007 u16 tx_rx_cal_phy_saveregs[11];
@@ -1006,12 +1017,12 @@ struct b43_phy_n {
1006 bool gband_spurwar_en; 1017 bool gband_spurwar_en;
1007 1018
1008 bool ipa2g_on; 1019 bool ipa2g_on;
1009 u8 iqcal_chanspec_2G; 1020 struct b43_chanspec iqcal_chanspec_2G;
1010 u8 rssical_chanspec_2G; 1021 struct b43_chanspec rssical_chanspec_2G;
1011 1022
1012 bool ipa5g_on; 1023 bool ipa5g_on;
1013 u8 iqcal_chanspec_5G; 1024 struct b43_chanspec iqcal_chanspec_5G;
1014 u8 rssical_chanspec_5G; 1025 struct b43_chanspec rssical_chanspec_5G;
1015 1026
1016 struct b43_phy_n_rssical_cache rssical_cache; 1027 struct b43_phy_n_rssical_cache rssical_cache;
1017 struct b43_phy_n_cal_cache cal_cache; 1028 struct b43_phy_n_cal_cache cal_cache;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index a00d509150f7..d96e870ab8fe 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -318,14 +318,14 @@ void b2055_upload_inittab(struct b43_wldev *dev,
318 .radio_c2_tx_mxbgtrim = r21 318 .radio_c2_tx_mxbgtrim = r21
319 319
320#define PHYREGS(r0, r1, r2, r3, r4, r5) \ 320#define PHYREGS(r0, r1, r2, r3, r4, r5) \
321 .phy_bw1a = r0, \ 321 .phy_regs.phy_bw1a = r0, \
322 .phy_bw2 = r1, \ 322 .phy_regs.phy_bw2 = r1, \
323 .phy_bw3 = r2, \ 323 .phy_regs.phy_bw3 = r2, \
324 .phy_bw4 = r3, \ 324 .phy_regs.phy_bw4 = r3, \
325 .phy_bw5 = r4, \ 325 .phy_regs.phy_bw5 = r4, \
326 .phy_bw6 = r5 326 .phy_regs.phy_bw6 = r5
327 327
328static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = { 328static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab[] = {
329 { .channel = 184, 329 { .channel = 184,
330 .freq = 4920, /* MHz */ 330 .freq = 4920, /* MHz */
331 .unk2 = 3280, 331 .unk2 = 3280,
@@ -1320,10 +1320,10 @@ static const struct b43_nphy_channeltab_entry b43_nphy_channeltab[] = {
1320 }, 1320 },
1321}; 1321};
1322 1322
1323const struct b43_nphy_channeltab_entry * 1323const struct b43_nphy_channeltab_entry_rev2 *
1324b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel) 1324b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel)
1325{ 1325{
1326 const struct b43_nphy_channeltab_entry *e; 1326 const struct b43_nphy_channeltab_entry_rev2 *e;
1327 unsigned int i; 1327 unsigned int i;
1328 1328
1329 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) { 1329 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab); i++) {
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 9c1c6ecd3672..8fc1da9f8fe5 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -4,9 +4,22 @@
4#include <linux/types.h> 4#include <linux/types.h>
5 5
6 6
7struct b43_nphy_channeltab_entry { 7struct b43_phy_n_sfo_cfg {
8 u16 phy_bw1a;
9 u16 phy_bw2;
10 u16 phy_bw3;
11 u16 phy_bw4;
12 u16 phy_bw5;
13 u16 phy_bw6;
14};
15
16struct b43_nphy_channeltab_entry_rev2 {
8 /* The channel number */ 17 /* The channel number */
9 u8 channel; 18 u8 channel;
19 /* The channel frequency in MHz */
20 u16 freq;
21 /* An unknown value */
22 u16 unk2;
10 /* Radio register values on channelswitch */ 23 /* Radio register values on channelswitch */
11 u8 radio_pll_ref; 24 u8 radio_pll_ref;
12 u8 radio_rf_pllmod0; 25 u8 radio_rf_pllmod0;
@@ -31,16 +44,18 @@ struct b43_nphy_channeltab_entry {
31 u8 radio_c2_tx_pgapadtn; 44 u8 radio_c2_tx_pgapadtn;
32 u8 radio_c2_tx_mxbgtrim; 45 u8 radio_c2_tx_mxbgtrim;
33 /* PHY register values on channelswitch */ 46 /* PHY register values on channelswitch */
34 u16 phy_bw1a; 47 struct b43_phy_n_sfo_cfg phy_regs;
35 u16 phy_bw2; 48};
36 u16 phy_bw3; 49
37 u16 phy_bw4; 50struct b43_nphy_channeltab_entry_rev3 {
38 u16 phy_bw5; 51 /* The channel number */
39 u16 phy_bw6; 52 u8 channel;
40 /* The channel frequency in MHz */ 53 /* The channel frequency in MHz */
41 u16 freq; 54 u16 freq;
42 /* An unknown value */ 55 /* Radio register values on channelswitch */
43 u16 unk2; 56 /* TODO */
57 /* PHY register values on channelswitch */
58 struct b43_phy_n_sfo_cfg phy_regs;
44}; 59};
45 60
46 61
@@ -77,8 +92,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
77 92
78/* Get the NPHY Channel Switch Table entry for a channel number. 93/* Get the NPHY Channel Switch Table entry for a channel number.
79 * Returns NULL on failure to find an entry. */ 94 * Returns NULL on failure to find an entry. */
80const struct b43_nphy_channeltab_entry * 95const struct b43_nphy_channeltab_entry_rev2 *
81b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel); 96b43_nphy_get_chantabent_rev2(struct b43_wldev *dev, u8 channel);
82 97
83 98
84/* The N-PHY tables. */ 99/* The N-PHY tables. */
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9b72c45a7748..2088ac029b35 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -2140,7 +2140,7 @@ static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status)
2140 DECLARE_SSID_BUF(ssid); 2140 DECLARE_SSID_BUF(ssid);
2141 2141
2142 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, 2142 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC,
2143 "disassociated: '%s' %pM \n", 2143 "disassociated: '%s' %pM\n",
2144 print_ssid(ssid, priv->essid, priv->essid_len), 2144 print_ssid(ssid, priv->essid, priv->essid_len),
2145 priv->bssid); 2145 priv->bssid);
2146 2146
@@ -3285,7 +3285,7 @@ static void ipw2100_irq_tasklet(struct ipw2100_priv *priv)
3285 3285
3286 if (inta & IPW2100_INTA_PARITY_ERROR) { 3286 if (inta & IPW2100_INTA_PARITY_ERROR) {
3287 printk(KERN_ERR DRV_NAME 3287 printk(KERN_ERR DRV_NAME
3288 ": ***** PARITY ERROR INTERRUPT !!!! \n"); 3288 ": ***** PARITY ERROR INTERRUPT !!!!\n");
3289 priv->inta_other++; 3289 priv->inta_other++;
3290 write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR); 3290 write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR);
3291 } 3291 }
@@ -6102,7 +6102,7 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6102 .ndo_validate_addr = eth_validate_addr, 6102 .ndo_validate_addr = eth_validate_addr,
6103}; 6103};
6104 6104
6105/* Look into using netdev destructor to shutdown ieee80211? */ 6105/* Look into using netdev destructor to shutdown libipw? */
6106 6106
6107static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, 6107static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6108 void __iomem * base_addr, 6108 void __iomem * base_addr,
@@ -6112,7 +6112,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
6112 struct ipw2100_priv *priv; 6112 struct ipw2100_priv *priv;
6113 struct net_device *dev; 6113 struct net_device *dev;
6114 6114
6115 dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0); 6115 dev = alloc_libipw(sizeof(struct ipw2100_priv), 0);
6116 if (!dev) 6116 if (!dev)
6117 return NULL; 6117 return NULL;
6118 priv = libipw_priv(dev); 6118 priv = libipw_priv(dev);
@@ -6425,7 +6425,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6425 sysfs_remove_group(&pci_dev->dev.kobj, 6425 sysfs_remove_group(&pci_dev->dev.kobj,
6426 &ipw2100_attribute_group); 6426 &ipw2100_attribute_group);
6427 6427
6428 free_ieee80211(dev, 0); 6428 free_libipw(dev, 0);
6429 pci_set_drvdata(pci_dev, NULL); 6429 pci_set_drvdata(pci_dev, NULL);
6430 } 6430 }
6431 6431
@@ -6483,10 +6483,10 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
6483 if (dev->base_addr) 6483 if (dev->base_addr)
6484 iounmap((void __iomem *)dev->base_addr); 6484 iounmap((void __iomem *)dev->base_addr);
6485 6485
6486 /* wiphy_unregister needs to be here, before free_ieee80211 */ 6486 /* wiphy_unregister needs to be here, before free_libipw */
6487 wiphy_unregister(priv->ieee->wdev.wiphy); 6487 wiphy_unregister(priv->ieee->wdev.wiphy);
6488 kfree(priv->ieee->bg_band.channels); 6488 kfree(priv->ieee->bg_band.channels);
6489 free_ieee80211(dev, 0); 6489 free_libipw(dev, 0);
6490 } 6490 }
6491 6491
6492 pci_release_regions(pci_dev); 6492 pci_release_regions(pci_dev);
@@ -6753,7 +6753,7 @@ static int ipw2100_wx_set_freq(struct net_device *dev,
6753 err = -EOPNOTSUPP; 6753 err = -EOPNOTSUPP;
6754 goto done; 6754 goto done;
6755 } else { /* Set the channel */ 6755 } else { /* Set the channel */
6756 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 6756 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
6757 err = ipw2100_set_channel(priv, fwrq->m, 0); 6757 err = ipw2100_set_channel(priv, fwrq->m, 0);
6758 } 6758 }
6759 6759
@@ -6782,7 +6782,7 @@ static int ipw2100_wx_get_freq(struct net_device *dev,
6782 else 6782 else
6783 wrqu->freq.m = 0; 6783 wrqu->freq.m = 0;
6784 6784
6785 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 6785 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
6786 return 0; 6786 return 0;
6787 6787
6788} 6788}
@@ -6794,7 +6794,7 @@ static int ipw2100_wx_set_mode(struct net_device *dev,
6794 struct ipw2100_priv *priv = libipw_priv(dev); 6794 struct ipw2100_priv *priv = libipw_priv(dev);
6795 int err = 0; 6795 int err = 0;
6796 6796
6797 IPW_DEBUG_WX("SET Mode -> %d \n", wrqu->mode); 6797 IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode);
6798 6798
6799 if (wrqu->mode == priv->ieee->iw_mode) 6799 if (wrqu->mode == priv->ieee->iw_mode)
6800 return 0; 6800 return 0;
@@ -7149,7 +7149,7 @@ static int ipw2100_wx_set_nick(struct net_device *dev,
7149 memset(priv->nick, 0, sizeof(priv->nick)); 7149 memset(priv->nick, 0, sizeof(priv->nick));
7150 memcpy(priv->nick, extra, wrqu->data.length); 7150 memcpy(priv->nick, extra, wrqu->data.length);
7151 7151
7152 IPW_DEBUG_WX("SET Nickname -> %s \n", priv->nick); 7152 IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick);
7153 7153
7154 return 0; 7154 return 0;
7155} 7155}
@@ -7168,7 +7168,7 @@ static int ipw2100_wx_get_nick(struct net_device *dev,
7168 memcpy(extra, priv->nick, wrqu->data.length); 7168 memcpy(extra, priv->nick, wrqu->data.length);
7169 wrqu->data.flags = 1; /* active */ 7169 wrqu->data.flags = 1; /* active */
7170 7170
7171 IPW_DEBUG_WX("GET Nickname -> %s \n", extra); 7171 IPW_DEBUG_WX("GET Nickname -> %s\n", extra);
7172 7172
7173 return 0; 7173 return 0;
7174} 7174}
@@ -7207,7 +7207,7 @@ static int ipw2100_wx_set_rate(struct net_device *dev,
7207 7207
7208 err = ipw2100_set_tx_rates(priv, rate, 0); 7208 err = ipw2100_set_tx_rates(priv, rate, 0);
7209 7209
7210 IPW_DEBUG_WX("SET Rate -> %04X \n", rate); 7210 IPW_DEBUG_WX("SET Rate -> %04X\n", rate);
7211 done: 7211 done:
7212 mutex_unlock(&priv->action_mutex); 7212 mutex_unlock(&priv->action_mutex);
7213 return err; 7213 return err;
@@ -7258,7 +7258,7 @@ static int ipw2100_wx_get_rate(struct net_device *dev,
7258 wrqu->bitrate.value = 0; 7258 wrqu->bitrate.value = 0;
7259 } 7259 }
7260 7260
7261 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 7261 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
7262 7262
7263 done: 7263 done:
7264 mutex_unlock(&priv->action_mutex); 7264 mutex_unlock(&priv->action_mutex);
@@ -7294,7 +7294,7 @@ static int ipw2100_wx_set_rts(struct net_device *dev,
7294 7294
7295 err = ipw2100_set_rts_threshold(priv, value); 7295 err = ipw2100_set_rts_threshold(priv, value);
7296 7296
7297 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X \n", value); 7297 IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value);
7298 done: 7298 done:
7299 mutex_unlock(&priv->action_mutex); 7299 mutex_unlock(&priv->action_mutex);
7300 return err; 7300 return err;
@@ -7316,7 +7316,7 @@ static int ipw2100_wx_get_rts(struct net_device *dev,
7316 /* If RTS is set to the default value, then it is disabled */ 7316 /* If RTS is set to the default value, then it is disabled */
7317 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0; 7317 wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0;
7318 7318
7319 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X \n", wrqu->rts.value); 7319 IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value);
7320 7320
7321 return 0; 7321 return 0;
7322} 7322}
@@ -7355,7 +7355,7 @@ static int ipw2100_wx_set_txpow(struct net_device *dev,
7355 7355
7356 err = ipw2100_set_tx_power(priv, value); 7356 err = ipw2100_set_tx_power(priv, value);
7357 7357
7358 IPW_DEBUG_WX("SET TX Power -> %d \n", value); 7358 IPW_DEBUG_WX("SET TX Power -> %d\n", value);
7359 7359
7360 done: 7360 done:
7361 mutex_unlock(&priv->action_mutex); 7361 mutex_unlock(&priv->action_mutex);
@@ -7384,7 +7384,7 @@ static int ipw2100_wx_get_txpow(struct net_device *dev,
7384 7384
7385 wrqu->txpower.flags = IW_TXPOW_DBM; 7385 wrqu->txpower.flags = IW_TXPOW_DBM;
7386 7386
7387 IPW_DEBUG_WX("GET TX Power -> %d \n", wrqu->txpower.value); 7387 IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value);
7388 7388
7389 return 0; 7389 return 0;
7390} 7390}
@@ -7414,7 +7414,7 @@ static int ipw2100_wx_set_frag(struct net_device *dev,
7414 priv->frag_threshold = priv->ieee->fts; 7414 priv->frag_threshold = priv->ieee->fts;
7415 } 7415 }
7416 7416
7417 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", priv->ieee->fts); 7417 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts);
7418 7418
7419 return 0; 7419 return 0;
7420} 7420}
@@ -7432,7 +7432,7 @@ static int ipw2100_wx_get_frag(struct net_device *dev,
7432 wrqu->frag.fixed = 0; /* no auto select */ 7432 wrqu->frag.fixed = 0; /* no auto select */
7433 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0; 7433 wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0;
7434 7434
7435 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 7435 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
7436 7436
7437 return 0; 7437 return 0;
7438} 7438}
@@ -7458,14 +7458,14 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7458 7458
7459 if (wrqu->retry.flags & IW_RETRY_SHORT) { 7459 if (wrqu->retry.flags & IW_RETRY_SHORT) {
7460 err = ipw2100_set_short_retry(priv, wrqu->retry.value); 7460 err = ipw2100_set_short_retry(priv, wrqu->retry.value);
7461 IPW_DEBUG_WX("SET Short Retry Limit -> %d \n", 7461 IPW_DEBUG_WX("SET Short Retry Limit -> %d\n",
7462 wrqu->retry.value); 7462 wrqu->retry.value);
7463 goto done; 7463 goto done;
7464 } 7464 }
7465 7465
7466 if (wrqu->retry.flags & IW_RETRY_LONG) { 7466 if (wrqu->retry.flags & IW_RETRY_LONG) {
7467 err = ipw2100_set_long_retry(priv, wrqu->retry.value); 7467 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7468 IPW_DEBUG_WX("SET Long Retry Limit -> %d \n", 7468 IPW_DEBUG_WX("SET Long Retry Limit -> %d\n",
7469 wrqu->retry.value); 7469 wrqu->retry.value);
7470 goto done; 7470 goto done;
7471 } 7471 }
@@ -7474,7 +7474,7 @@ static int ipw2100_wx_set_retry(struct net_device *dev,
7474 if (!err) 7474 if (!err)
7475 err = ipw2100_set_long_retry(priv, wrqu->retry.value); 7475 err = ipw2100_set_long_retry(priv, wrqu->retry.value);
7476 7476
7477 IPW_DEBUG_WX("SET Both Retry Limits -> %d \n", wrqu->retry.value); 7477 IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value);
7478 7478
7479 done: 7479 done:
7480 mutex_unlock(&priv->action_mutex); 7480 mutex_unlock(&priv->action_mutex);
@@ -7508,7 +7508,7 @@ static int ipw2100_wx_get_retry(struct net_device *dev,
7508 wrqu->retry.value = priv->short_retry_limit; 7508 wrqu->retry.value = priv->short_retry_limit;
7509 } 7509 }
7510 7510
7511 IPW_DEBUG_WX("GET Retry -> %d \n", wrqu->retry.value); 7511 IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value);
7512 7512
7513 return 0; 7513 return 0;
7514} 7514}
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 8d72e3d19586..82de71a3aea7 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -459,7 +459,7 @@ static u8 _ipw_read_reg8(struct ipw_priv *priv, u32 reg)
459{ 459{
460 u32 word; 460 u32 word;
461 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK); 461 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg & IPW_INDIRECT_ADDR_MASK);
462 IPW_DEBUG_IO(" reg = 0x%8X : \n", reg); 462 IPW_DEBUG_IO(" reg = 0x%8X :\n", reg);
463 word = _ipw_read32(priv, IPW_INDIRECT_DATA); 463 word = _ipw_read32(priv, IPW_INDIRECT_DATA);
464 return (word >> ((reg & 0x3) * 8)) & 0xff; 464 return (word >> ((reg & 0x3) * 8)) & 0xff;
465} 465}
@@ -473,7 +473,7 @@ static u32 _ipw_read_reg32(struct ipw_priv *priv, u32 reg)
473 473
474 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg); 474 _ipw_write32(priv, IPW_INDIRECT_ADDR, reg);
475 value = _ipw_read32(priv, IPW_INDIRECT_DATA); 475 value = _ipw_read32(priv, IPW_INDIRECT_DATA);
476 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x \n", reg, value); 476 IPW_DEBUG_IO(" reg = 0x%4X : value = 0x%4x\n", reg, value);
477 return value; 477 return value;
478} 478}
479 479
@@ -2349,16 +2349,25 @@ static void ipw_bg_adapter_restart(struct work_struct *work)
2349 mutex_unlock(&priv->mutex); 2349 mutex_unlock(&priv->mutex);
2350} 2350}
2351 2351
2352#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ) 2352static void ipw_abort_scan(struct ipw_priv *priv);
2353
2354#define IPW_SCAN_CHECK_WATCHDOG (5 * HZ)
2353 2355
2354static void ipw_scan_check(void *data) 2356static void ipw_scan_check(void *data)
2355{ 2357{
2356 struct ipw_priv *priv = data; 2358 struct ipw_priv *priv = data;
2357 if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { 2359
2360 if (priv->status & STATUS_SCAN_ABORTING) {
2358 IPW_DEBUG_SCAN("Scan completion watchdog resetting " 2361 IPW_DEBUG_SCAN("Scan completion watchdog resetting "
2359 "adapter after (%dms).\n", 2362 "adapter after (%dms).\n",
2360 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG)); 2363 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2361 queue_work(priv->workqueue, &priv->adapter_restart); 2364 queue_work(priv->workqueue, &priv->adapter_restart);
2365 } else if (priv->status & STATUS_SCANNING) {
2366 IPW_DEBUG_SCAN("Scan completion watchdog aborting scan "
2367 "after (%dms).\n",
2368 jiffies_to_msecs(IPW_SCAN_CHECK_WATCHDOG));
2369 ipw_abort_scan(priv);
2370 queue_delayed_work(priv->workqueue, &priv->scan_check, HZ);
2362 } 2371 }
2363} 2372}
2364 2373
@@ -2739,7 +2748,7 @@ static inline void ipw_fw_dma_reset_command_blocks(struct ipw_priv *priv)
2739static int ipw_fw_dma_enable(struct ipw_priv *priv) 2748static int ipw_fw_dma_enable(struct ipw_priv *priv)
2740{ /* start dma engine but no transfers yet */ 2749{ /* start dma engine but no transfers yet */
2741 2750
2742 IPW_DEBUG_FW(">> : \n"); 2751 IPW_DEBUG_FW(">> :\n");
2743 2752
2744 /* Start the dma */ 2753 /* Start the dma */
2745 ipw_fw_dma_reset_command_blocks(priv); 2754 ipw_fw_dma_reset_command_blocks(priv);
@@ -2747,7 +2756,7 @@ static int ipw_fw_dma_enable(struct ipw_priv *priv)
2747 /* Write CB base address */ 2756 /* Write CB base address */
2748 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL); 2757 ipw_write_reg32(priv, IPW_DMA_I_CB_BASE, IPW_SHARED_SRAM_DMA_CONTROL);
2749 2758
2750 IPW_DEBUG_FW("<< : \n"); 2759 IPW_DEBUG_FW("<< :\n");
2751 return 0; 2760 return 0;
2752} 2761}
2753 2762
@@ -2762,7 +2771,7 @@ static void ipw_fw_dma_abort(struct ipw_priv *priv)
2762 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control); 2771 ipw_write_reg32(priv, IPW_DMA_I_DMA_CONTROL, control);
2763 priv->sram_desc.last_cb_index = 0; 2772 priv->sram_desc.last_cb_index = 0;
2764 2773
2765 IPW_DEBUG_FW("<< \n"); 2774 IPW_DEBUG_FW("<<\n");
2766} 2775}
2767 2776
2768static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index, 2777static int ipw_fw_dma_write_command_block(struct ipw_priv *priv, int index,
@@ -2813,29 +2822,29 @@ static void ipw_fw_dma_dump_command_block(struct ipw_priv *priv)
2813 2822
2814 IPW_DEBUG_FW(">> :\n"); 2823 IPW_DEBUG_FW(">> :\n");
2815 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB); 2824 address = ipw_read_reg32(priv, IPW_DMA_I_CURRENT_CB);
2816 IPW_DEBUG_FW_INFO("Current CB is 0x%x \n", address); 2825 IPW_DEBUG_FW_INFO("Current CB is 0x%x\n", address);
2817 2826
2818 /* Read the DMA Controlor register */ 2827 /* Read the DMA Controlor register */
2819 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL); 2828 register_value = ipw_read_reg32(priv, IPW_DMA_I_DMA_CONTROL);
2820 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x \n", register_value); 2829 IPW_DEBUG_FW_INFO("IPW_DMA_I_DMA_CONTROL is 0x%x\n", register_value);
2821 2830
2822 /* Print the CB values */ 2831 /* Print the CB values */
2823 cb_fields_address = address; 2832 cb_fields_address = address;
2824 register_value = ipw_read_reg32(priv, cb_fields_address); 2833 register_value = ipw_read_reg32(priv, cb_fields_address);
2825 IPW_DEBUG_FW_INFO("Current CB ControlField is 0x%x \n", register_value); 2834 IPW_DEBUG_FW_INFO("Current CB Control Field is 0x%x\n", register_value);
2826 2835
2827 cb_fields_address += sizeof(u32); 2836 cb_fields_address += sizeof(u32);
2828 register_value = ipw_read_reg32(priv, cb_fields_address); 2837 register_value = ipw_read_reg32(priv, cb_fields_address);
2829 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x \n", register_value); 2838 IPW_DEBUG_FW_INFO("Current CB Source Field is 0x%x\n", register_value);
2830 2839
2831 cb_fields_address += sizeof(u32); 2840 cb_fields_address += sizeof(u32);
2832 register_value = ipw_read_reg32(priv, cb_fields_address); 2841 register_value = ipw_read_reg32(priv, cb_fields_address);
2833 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x \n", 2842 IPW_DEBUG_FW_INFO("Current CB Destination Field is 0x%x\n",
2834 register_value); 2843 register_value);
2835 2844
2836 cb_fields_address += sizeof(u32); 2845 cb_fields_address += sizeof(u32);
2837 register_value = ipw_read_reg32(priv, cb_fields_address); 2846 register_value = ipw_read_reg32(priv, cb_fields_address);
2838 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x \n", register_value); 2847 IPW_DEBUG_FW_INFO("Current CB Status Field is 0x%x\n", register_value);
2839 2848
2840 IPW_DEBUG_FW(">> :\n"); 2849 IPW_DEBUG_FW(">> :\n");
2841} 2850}
@@ -2851,7 +2860,7 @@ static int ipw_fw_dma_command_block_index(struct ipw_priv *priv)
2851 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) / 2860 current_cb_index = (current_cb_address - IPW_SHARED_SRAM_DMA_CONTROL) /
2852 sizeof(struct command_block); 2861 sizeof(struct command_block);
2853 2862
2854 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X \n", 2863 IPW_DEBUG_FW_INFO("Current CB index 0x%x address = 0x%X\n",
2855 current_cb_index, current_cb_address); 2864 current_cb_index, current_cb_address);
2856 2865
2857 IPW_DEBUG_FW(">> :\n"); 2866 IPW_DEBUG_FW(">> :\n");
@@ -2910,7 +2919,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2910 int ret, i; 2919 int ret, i;
2911 u32 size; 2920 u32 size;
2912 2921
2913 IPW_DEBUG_FW(">> \n"); 2922 IPW_DEBUG_FW(">>\n");
2914 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n", 2923 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2915 nr, dest_address, len); 2924 nr, dest_address, len);
2916 2925
@@ -2927,7 +2936,7 @@ static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2927 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2936 IPW_DEBUG_FW_INFO(": Added new cb\n");
2928 } 2937 }
2929 2938
2930 IPW_DEBUG_FW("<< \n"); 2939 IPW_DEBUG_FW("<<\n");
2931 return 0; 2940 return 0;
2932} 2941}
2933 2942
@@ -2936,7 +2945,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
2936 u32 current_index = 0, previous_index; 2945 u32 current_index = 0, previous_index;
2937 u32 watchdog = 0; 2946 u32 watchdog = 0;
2938 2947
2939 IPW_DEBUG_FW(">> : \n"); 2948 IPW_DEBUG_FW(">> :\n");
2940 2949
2941 current_index = ipw_fw_dma_command_block_index(priv); 2950 current_index = ipw_fw_dma_command_block_index(priv);
2942 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n", 2951 IPW_DEBUG_FW_INFO("sram_desc.last_cb_index:0x%08X\n",
@@ -2965,7 +2974,7 @@ static int ipw_fw_dma_wait(struct ipw_priv *priv)
2965 ipw_set_bit(priv, IPW_RESET_REG, 2974 ipw_set_bit(priv, IPW_RESET_REG,
2966 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER); 2975 IPW_RESET_REG_MASTER_DISABLED | IPW_RESET_REG_STOP_MASTER);
2967 2976
2968 IPW_DEBUG_FW("<< dmaWaitSync \n"); 2977 IPW_DEBUG_FW("<< dmaWaitSync\n");
2969 return 0; 2978 return 0;
2970} 2979}
2971 2980
@@ -3026,7 +3035,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
3026{ 3035{
3027 int rc; 3036 int rc;
3028 3037
3029 IPW_DEBUG_TRACE(">> \n"); 3038 IPW_DEBUG_TRACE(">>\n");
3030 /* stop master. typical delay - 0 */ 3039 /* stop master. typical delay - 0 */
3031 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER); 3040 ipw_set_bit(priv, IPW_RESET_REG, IPW_RESET_REG_STOP_MASTER);
3032 3041
@@ -3045,7 +3054,7 @@ static int ipw_stop_master(struct ipw_priv *priv)
3045 3054
3046static void ipw_arc_release(struct ipw_priv *priv) 3055static void ipw_arc_release(struct ipw_priv *priv)
3047{ 3056{
3048 IPW_DEBUG_TRACE(">> \n"); 3057 IPW_DEBUG_TRACE(">>\n");
3049 mdelay(5); 3058 mdelay(5);
3050 3059
3051 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET); 3060 ipw_clear_bit(priv, IPW_RESET_REG, CBD_RESET_REG_PRINCETON_RESET);
@@ -3067,7 +3076,7 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3067 3076
3068 image = (__le16 *) data; 3077 image = (__le16 *) data;
3069 3078
3070 IPW_DEBUG_TRACE(">> \n"); 3079 IPW_DEBUG_TRACE(">>\n");
3071 3080
3072 rc = ipw_stop_master(priv); 3081 rc = ipw_stop_master(priv);
3073 3082
@@ -3181,7 +3190,7 @@ static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3181 void **virts; 3190 void **virts;
3182 dma_addr_t *phys; 3191 dma_addr_t *phys;
3183 3192
3184 IPW_DEBUG_TRACE("<< : \n"); 3193 IPW_DEBUG_TRACE("<< :\n");
3185 3194
3186 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL, 3195 virts = kmalloc(sizeof(void *) * CB_NUMBER_OF_ELEMENTS_SMALL,
3187 GFP_KERNEL); 3196 GFP_KERNEL);
@@ -4482,7 +4491,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4482 case CMAS_ASSOCIATED:{ 4491 case CMAS_ASSOCIATED:{
4483 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4492 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4484 IPW_DL_ASSOC, 4493 IPW_DL_ASSOC,
4485 "associated: '%s' %pM \n", 4494 "associated: '%s' %pM\n",
4486 print_ssid(ssid, priv->essid, 4495 print_ssid(ssid, priv->essid,
4487 priv->essid_len), 4496 priv->essid_len),
4488 priv->bssid); 4497 priv->bssid);
@@ -4563,7 +4572,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4563 IPW_DL_ASSOC, 4572 IPW_DL_ASSOC,
4564 "deauthenticated: '%s' " 4573 "deauthenticated: '%s' "
4565 "%pM" 4574 "%pM"
4566 ": (0x%04X) - %s \n", 4575 ": (0x%04X) - %s\n",
4567 print_ssid(ssid, 4576 print_ssid(ssid,
4568 priv-> 4577 priv->
4569 essid, 4578 essid,
@@ -4614,7 +4623,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4614 4623
4615 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | 4624 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE |
4616 IPW_DL_ASSOC, 4625 IPW_DL_ASSOC,
4617 "disassociated: '%s' %pM \n", 4626 "disassociated: '%s' %pM\n",
4618 print_ssid(ssid, priv->essid, 4627 print_ssid(ssid, priv->essid,
4619 priv->essid_len), 4628 priv->essid_len),
4620 priv->bssid); 4629 priv->bssid);
@@ -4652,7 +4661,7 @@ static void ipw_rx_notification(struct ipw_priv *priv,
4652 switch (auth->state) { 4661 switch (auth->state) {
4653 case CMAS_AUTHENTICATED: 4662 case CMAS_AUTHENTICATED:
4654 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE, 4663 IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE,
4655 "authenticated: '%s' %pM \n", 4664 "authenticated: '%s' %pM\n",
4656 print_ssid(ssid, priv->essid, 4665 print_ssid(ssid, priv->essid,
4657 priv->essid_len), 4666 priv->essid_len),
4658 priv->bssid); 4667 priv->bssid);
@@ -6925,7 +6934,7 @@ static u8 ipw_qos_current_mode(struct ipw_priv * priv)
6925 } else { 6934 } else {
6926 mode = priv->ieee->mode; 6935 mode = priv->ieee->mode;
6927 } 6936 }
6928 IPW_DEBUG_QOS("QoS network/card mode %d \n", mode); 6937 IPW_DEBUG_QOS("QoS network/card mode %d\n", mode);
6929 return mode; 6938 return mode;
6930} 6939}
6931 6940
@@ -6965,7 +6974,7 @@ static int ipw_qos_handle_probe_response(struct ipw_priv *priv,
6965 &def_parameters_OFDM, size); 6974 &def_parameters_OFDM, size);
6966 6975
6967 if ((network->qos_data.active == 1) && (active_network == 1)) { 6976 if ((network->qos_data.active == 1) && (active_network == 1)) {
6968 IPW_DEBUG_QOS("QoS was disabled call qos_activate \n"); 6977 IPW_DEBUG_QOS("QoS was disabled call qos_activate\n");
6969 schedule_work(&priv->qos_activate); 6978 schedule_work(&priv->qos_activate);
6970 } 6979 }
6971 6980
@@ -7542,7 +7551,7 @@ static int ipw_associate_network(struct ipw_priv *priv,
7542 return err; 7551 return err;
7543 } 7552 }
7544 7553
7545 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM \n", 7554 IPW_DEBUG(IPW_DL_STATE, "associating: '%s' %pM\n",
7546 print_ssid(ssid, priv->essid, priv->essid_len), 7555 print_ssid(ssid, priv->essid, priv->essid_len),
7547 priv->bssid); 7556 priv->bssid);
7548 7557
@@ -8793,7 +8802,7 @@ static int ipw_wx_set_freq(struct net_device *dev,
8793 } 8802 }
8794 } 8803 }
8795 8804
8796 IPW_DEBUG_WX("SET Freq/Channel -> %d \n", fwrq->m); 8805 IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m);
8797 mutex_lock(&priv->mutex); 8806 mutex_lock(&priv->mutex);
8798 ret = ipw_set_channel(priv, channel); 8807 ret = ipw_set_channel(priv, channel);
8799 mutex_unlock(&priv->mutex); 8808 mutex_unlock(&priv->mutex);
@@ -8835,7 +8844,7 @@ static int ipw_wx_get_freq(struct net_device *dev,
8835 wrqu->freq.m = 0; 8844 wrqu->freq.m = 0;
8836 8845
8837 mutex_unlock(&priv->mutex); 8846 mutex_unlock(&priv->mutex);
8838 IPW_DEBUG_WX("GET Freq/Channel -> %d \n", priv->channel); 8847 IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel);
8839 return 0; 8848 return 0;
8840} 8849}
8841 8850
@@ -9230,7 +9239,7 @@ static int ipw_wx_get_sens(struct net_device *dev,
9230 wrqu->sens.value = priv->roaming_threshold; 9239 wrqu->sens.value = priv->roaming_threshold;
9231 mutex_unlock(&priv->mutex); 9240 mutex_unlock(&priv->mutex);
9232 9241
9233 IPW_DEBUG_WX("GET roaming threshold -> %s %d \n", 9242 IPW_DEBUG_WX("GET roaming threshold -> %s %d\n",
9234 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9243 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9235 9244
9236 return 0; 9245 return 0;
@@ -9358,7 +9367,7 @@ static int ipw_wx_get_rate(struct net_device *dev,
9358 wrqu->bitrate.value = priv->last_rate; 9367 wrqu->bitrate.value = priv->last_rate;
9359 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0; 9368 wrqu->bitrate.fixed = (priv->config & CFG_FIXED_RATE) ? 1 : 0;
9360 mutex_unlock(&priv->mutex); 9369 mutex_unlock(&priv->mutex);
9361 IPW_DEBUG_WX("GET Rate -> %d \n", wrqu->bitrate.value); 9370 IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value);
9362 return 0; 9371 return 0;
9363} 9372}
9364 9373
@@ -9381,7 +9390,7 @@ static int ipw_wx_set_rts(struct net_device *dev,
9381 9390
9382 ipw_send_rts_threshold(priv, priv->rts_threshold); 9391 ipw_send_rts_threshold(priv, priv->rts_threshold);
9383 mutex_unlock(&priv->mutex); 9392 mutex_unlock(&priv->mutex);
9384 IPW_DEBUG_WX("SET RTS Threshold -> %d \n", priv->rts_threshold); 9393 IPW_DEBUG_WX("SET RTS Threshold -> %d\n", priv->rts_threshold);
9385 return 0; 9394 return 0;
9386} 9395}
9387 9396
@@ -9395,7 +9404,7 @@ static int ipw_wx_get_rts(struct net_device *dev,
9395 wrqu->rts.fixed = 0; /* no auto select */ 9404 wrqu->rts.fixed = 0; /* no auto select */
9396 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); 9405 wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD);
9397 mutex_unlock(&priv->mutex); 9406 mutex_unlock(&priv->mutex);
9398 IPW_DEBUG_WX("GET RTS Threshold -> %d \n", wrqu->rts.value); 9407 IPW_DEBUG_WX("GET RTS Threshold -> %d\n", wrqu->rts.value);
9399 return 0; 9408 return 0;
9400} 9409}
9401 9410
@@ -9445,7 +9454,7 @@ static int ipw_wx_get_txpow(struct net_device *dev,
9445 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; 9454 wrqu->power.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0;
9446 mutex_unlock(&priv->mutex); 9455 mutex_unlock(&priv->mutex);
9447 9456
9448 IPW_DEBUG_WX("GET TX Power -> %s %d \n", 9457 IPW_DEBUG_WX("GET TX Power -> %s %d\n",
9449 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value); 9458 wrqu->power.disabled ? "OFF" : "ON", wrqu->power.value);
9450 9459
9451 return 0; 9460 return 0;
@@ -9471,7 +9480,7 @@ static int ipw_wx_set_frag(struct net_device *dev,
9471 9480
9472 ipw_send_frag_threshold(priv, wrqu->frag.value); 9481 ipw_send_frag_threshold(priv, wrqu->frag.value);
9473 mutex_unlock(&priv->mutex); 9482 mutex_unlock(&priv->mutex);
9474 IPW_DEBUG_WX("SET Frag Threshold -> %d \n", wrqu->frag.value); 9483 IPW_DEBUG_WX("SET Frag Threshold -> %d\n", wrqu->frag.value);
9475 return 0; 9484 return 0;
9476} 9485}
9477 9486
@@ -9485,7 +9494,7 @@ static int ipw_wx_get_frag(struct net_device *dev,
9485 wrqu->frag.fixed = 0; /* no auto select */ 9494 wrqu->frag.fixed = 0; /* no auto select */
9486 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS); 9495 wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FTS);
9487 mutex_unlock(&priv->mutex); 9496 mutex_unlock(&priv->mutex);
9488 IPW_DEBUG_WX("GET Frag Threshold -> %d \n", wrqu->frag.value); 9497 IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value);
9489 9498
9490 return 0; 9499 return 0;
9491} 9500}
@@ -9549,7 +9558,7 @@ static int ipw_wx_get_retry(struct net_device *dev,
9549 } 9558 }
9550 mutex_unlock(&priv->mutex); 9559 mutex_unlock(&priv->mutex);
9551 9560
9552 IPW_DEBUG_WX("GET retry -> %d \n", wrqu->retry.value); 9561 IPW_DEBUG_WX("GET retry -> %d\n", wrqu->retry.value);
9553 9562
9554 return 0; 9563 return 0;
9555} 9564}
@@ -9996,49 +10005,48 @@ static int ipw_wx_sw_reset(struct net_device *dev,
9996} 10005}
9997 10006
9998/* Rebase the WE IOCTLs to zero for the handler array */ 10007/* Rebase the WE IOCTLs to zero for the handler array */
9999#define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
10000static iw_handler ipw_wx_handlers[] = { 10008static iw_handler ipw_wx_handlers[] = {
10001 IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname, 10009 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
10002 IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq, 10010 IW_HANDLER(SIOCSIWFREQ, ipw_wx_set_freq),
10003 IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq, 10011 IW_HANDLER(SIOCGIWFREQ, ipw_wx_get_freq),
10004 IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode, 10012 IW_HANDLER(SIOCSIWMODE, ipw_wx_set_mode),
10005 IW_IOCTL(SIOCGIWMODE) = ipw_wx_get_mode, 10013 IW_HANDLER(SIOCGIWMODE, ipw_wx_get_mode),
10006 IW_IOCTL(SIOCSIWSENS) = ipw_wx_set_sens, 10014 IW_HANDLER(SIOCSIWSENS, ipw_wx_set_sens),
10007 IW_IOCTL(SIOCGIWSENS) = ipw_wx_get_sens, 10015 IW_HANDLER(SIOCGIWSENS, ipw_wx_get_sens),
10008 IW_IOCTL(SIOCGIWRANGE) = ipw_wx_get_range, 10016 IW_HANDLER(SIOCGIWRANGE, ipw_wx_get_range),
10009 IW_IOCTL(SIOCSIWAP) = ipw_wx_set_wap, 10017 IW_HANDLER(SIOCSIWAP, ipw_wx_set_wap),
10010 IW_IOCTL(SIOCGIWAP) = ipw_wx_get_wap, 10018 IW_HANDLER(SIOCGIWAP, ipw_wx_get_wap),
10011 IW_IOCTL(SIOCSIWSCAN) = ipw_wx_set_scan, 10019 IW_HANDLER(SIOCSIWSCAN, ipw_wx_set_scan),
10012 IW_IOCTL(SIOCGIWSCAN) = ipw_wx_get_scan, 10020 IW_HANDLER(SIOCGIWSCAN, ipw_wx_get_scan),
10013 IW_IOCTL(SIOCSIWESSID) = ipw_wx_set_essid, 10021 IW_HANDLER(SIOCSIWESSID, ipw_wx_set_essid),
10014 IW_IOCTL(SIOCGIWESSID) = ipw_wx_get_essid, 10022 IW_HANDLER(SIOCGIWESSID, ipw_wx_get_essid),
10015 IW_IOCTL(SIOCSIWNICKN) = ipw_wx_set_nick, 10023 IW_HANDLER(SIOCSIWNICKN, ipw_wx_set_nick),
10016 IW_IOCTL(SIOCGIWNICKN) = ipw_wx_get_nick, 10024 IW_HANDLER(SIOCGIWNICKN, ipw_wx_get_nick),
10017 IW_IOCTL(SIOCSIWRATE) = ipw_wx_set_rate, 10025 IW_HANDLER(SIOCSIWRATE, ipw_wx_set_rate),
10018 IW_IOCTL(SIOCGIWRATE) = ipw_wx_get_rate, 10026 IW_HANDLER(SIOCGIWRATE, ipw_wx_get_rate),
10019 IW_IOCTL(SIOCSIWRTS) = ipw_wx_set_rts, 10027 IW_HANDLER(SIOCSIWRTS, ipw_wx_set_rts),
10020 IW_IOCTL(SIOCGIWRTS) = ipw_wx_get_rts, 10028 IW_HANDLER(SIOCGIWRTS, ipw_wx_get_rts),
10021 IW_IOCTL(SIOCSIWFRAG) = ipw_wx_set_frag, 10029 IW_HANDLER(SIOCSIWFRAG, ipw_wx_set_frag),
10022 IW_IOCTL(SIOCGIWFRAG) = ipw_wx_get_frag, 10030 IW_HANDLER(SIOCGIWFRAG, ipw_wx_get_frag),
10023 IW_IOCTL(SIOCSIWTXPOW) = ipw_wx_set_txpow, 10031 IW_HANDLER(SIOCSIWTXPOW, ipw_wx_set_txpow),
10024 IW_IOCTL(SIOCGIWTXPOW) = ipw_wx_get_txpow, 10032 IW_HANDLER(SIOCGIWTXPOW, ipw_wx_get_txpow),
10025 IW_IOCTL(SIOCSIWRETRY) = ipw_wx_set_retry, 10033 IW_HANDLER(SIOCSIWRETRY, ipw_wx_set_retry),
10026 IW_IOCTL(SIOCGIWRETRY) = ipw_wx_get_retry, 10034 IW_HANDLER(SIOCGIWRETRY, ipw_wx_get_retry),
10027 IW_IOCTL(SIOCSIWENCODE) = ipw_wx_set_encode, 10035 IW_HANDLER(SIOCSIWENCODE, ipw_wx_set_encode),
10028 IW_IOCTL(SIOCGIWENCODE) = ipw_wx_get_encode, 10036 IW_HANDLER(SIOCGIWENCODE, ipw_wx_get_encode),
10029 IW_IOCTL(SIOCSIWPOWER) = ipw_wx_set_power, 10037 IW_HANDLER(SIOCSIWPOWER, ipw_wx_set_power),
10030 IW_IOCTL(SIOCGIWPOWER) = ipw_wx_get_power, 10038 IW_HANDLER(SIOCGIWPOWER, ipw_wx_get_power),
10031 IW_IOCTL(SIOCSIWSPY) = iw_handler_set_spy, 10039 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
10032 IW_IOCTL(SIOCGIWSPY) = iw_handler_get_spy, 10040 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
10033 IW_IOCTL(SIOCSIWTHRSPY) = iw_handler_set_thrspy, 10041 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
10034 IW_IOCTL(SIOCGIWTHRSPY) = iw_handler_get_thrspy, 10042 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
10035 IW_IOCTL(SIOCSIWGENIE) = ipw_wx_set_genie, 10043 IW_HANDLER(SIOCSIWGENIE, ipw_wx_set_genie),
10036 IW_IOCTL(SIOCGIWGENIE) = ipw_wx_get_genie, 10044 IW_HANDLER(SIOCGIWGENIE, ipw_wx_get_genie),
10037 IW_IOCTL(SIOCSIWMLME) = ipw_wx_set_mlme, 10045 IW_HANDLER(SIOCSIWMLME, ipw_wx_set_mlme),
10038 IW_IOCTL(SIOCSIWAUTH) = ipw_wx_set_auth, 10046 IW_HANDLER(SIOCSIWAUTH, ipw_wx_set_auth),
10039 IW_IOCTL(SIOCGIWAUTH) = ipw_wx_get_auth, 10047 IW_HANDLER(SIOCGIWAUTH, ipw_wx_get_auth),
10040 IW_IOCTL(SIOCSIWENCODEEXT) = ipw_wx_set_encodeext, 10048 IW_HANDLER(SIOCSIWENCODEEXT, ipw_wx_set_encodeext),
10041 IW_IOCTL(SIOCGIWENCODEEXT) = ipw_wx_get_encodeext, 10049 IW_HANDLER(SIOCGIWENCODEEXT, ipw_wx_get_encodeext),
10042}; 10050};
10043 10051
10044enum { 10052enum {
@@ -11667,7 +11675,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11667 if (priv->prom_net_dev) 11675 if (priv->prom_net_dev)
11668 return -EPERM; 11676 return -EPERM;
11669 11677
11670 priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1); 11678 priv->prom_net_dev = alloc_libipw(sizeof(struct ipw_prom_priv), 1);
11671 if (priv->prom_net_dev == NULL) 11679 if (priv->prom_net_dev == NULL)
11672 return -ENOMEM; 11680 return -ENOMEM;
11673 11681
@@ -11686,7 +11694,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
11686 11694
11687 rc = register_netdev(priv->prom_net_dev); 11695 rc = register_netdev(priv->prom_net_dev);
11688 if (rc) { 11696 if (rc) {
11689 free_ieee80211(priv->prom_net_dev, 1); 11697 free_libipw(priv->prom_net_dev, 1);
11690 priv->prom_net_dev = NULL; 11698 priv->prom_net_dev = NULL;
11691 return rc; 11699 return rc;
11692 } 11700 }
@@ -11700,7 +11708,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
11700 return; 11708 return;
11701 11709
11702 unregister_netdev(priv->prom_net_dev); 11710 unregister_netdev(priv->prom_net_dev);
11703 free_ieee80211(priv->prom_net_dev, 1); 11711 free_libipw(priv->prom_net_dev, 1);
11704 11712
11705 priv->prom_net_dev = NULL; 11713 priv->prom_net_dev = NULL;
11706} 11714}
@@ -11728,7 +11736,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11728 struct ipw_priv *priv; 11736 struct ipw_priv *priv;
11729 int i; 11737 int i;
11730 11738
11731 net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0); 11739 net_dev = alloc_libipw(sizeof(struct ipw_priv), 0);
11732 if (net_dev == NULL) { 11740 if (net_dev == NULL) {
11733 err = -ENOMEM; 11741 err = -ENOMEM;
11734 goto out; 11742 goto out;
@@ -11748,7 +11756,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11748 mutex_init(&priv->mutex); 11756 mutex_init(&priv->mutex);
11749 if (pci_enable_device(pdev)) { 11757 if (pci_enable_device(pdev)) {
11750 err = -ENODEV; 11758 err = -ENODEV;
11751 goto out_free_ieee80211; 11759 goto out_free_libipw;
11752 } 11760 }
11753 11761
11754 pci_set_master(pdev); 11762 pci_set_master(pdev);
@@ -11875,8 +11883,8 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
11875 out_pci_disable_device: 11883 out_pci_disable_device:
11876 pci_disable_device(pdev); 11884 pci_disable_device(pdev);
11877 pci_set_drvdata(pdev, NULL); 11885 pci_set_drvdata(pdev, NULL);
11878 out_free_ieee80211: 11886 out_free_libipw:
11879 free_ieee80211(priv->net_dev, 0); 11887 free_libipw(priv->net_dev, 0);
11880 out: 11888 out:
11881 return err; 11889 return err;
11882} 11890}
@@ -11943,11 +11951,11 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
11943 pci_release_regions(pdev); 11951 pci_release_regions(pdev);
11944 pci_disable_device(pdev); 11952 pci_disable_device(pdev);
11945 pci_set_drvdata(pdev, NULL); 11953 pci_set_drvdata(pdev, NULL);
11946 /* wiphy_unregister needs to be here, before free_ieee80211 */ 11954 /* wiphy_unregister needs to be here, before free_libipw */
11947 wiphy_unregister(priv->ieee->wdev.wiphy); 11955 wiphy_unregister(priv->ieee->wdev.wiphy);
11948 kfree(priv->ieee->a_band.channels); 11956 kfree(priv->ieee->a_band.channels);
11949 kfree(priv->ieee->bg_band.channels); 11957 kfree(priv->ieee->bg_band.channels);
11950 free_ieee80211(priv->net_dev, 0); 11958 free_libipw(priv->net_dev, 0);
11951 free_firmware(); 11959 free_firmware();
11952} 11960}
11953 11961
diff --git a/drivers/net/wireless/ipw2x00/libipw.h b/drivers/net/wireless/ipw2x00/libipw.h
index a6d5e42647e4..284b0e4cb815 100644
--- a/drivers/net/wireless/ipw2x00/libipw.h
+++ b/drivers/net/wireless/ipw2x00/libipw.h
@@ -64,7 +64,7 @@
64extern u32 libipw_debug_level; 64extern u32 libipw_debug_level;
65#define LIBIPW_DEBUG(level, fmt, args...) \ 65#define LIBIPW_DEBUG(level, fmt, args...) \
66do { if (libipw_debug_level & (level)) \ 66do { if (libipw_debug_level & (level)) \
67 printk(KERN_DEBUG "ieee80211: %c %s " fmt, \ 67 printk(KERN_DEBUG "libipw: %c %s " fmt, \
68 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0) 68 in_interrupt() ? 'I' : 'U', __func__ , ## args); } while (0)
69static inline bool libipw_ratelimit_debug(u32 level) 69static inline bool libipw_ratelimit_debug(u32 level)
70{ 70{
@@ -116,8 +116,8 @@ static inline bool libipw_ratelimit_debug(u32 level)
116#define LIBIPW_DL_RX (1<<9) 116#define LIBIPW_DL_RX (1<<9)
117#define LIBIPW_DL_QOS (1<<31) 117#define LIBIPW_DL_QOS (1<<31)
118 118
119#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "ieee80211: " f, ## a) 119#define LIBIPW_ERROR(f, a...) printk(KERN_ERR "libipw: " f, ## a)
120#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "ieee80211: " f, ## a) 120#define LIBIPW_WARNING(f, a...) printk(KERN_WARNING "libipw: " f, ## a)
121#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a) 121#define LIBIPW_DEBUG_INFO(f, a...) LIBIPW_DEBUG(LIBIPW_DL_INFO, f, ## a)
122 122
123#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a) 123#define LIBIPW_DEBUG_WX(f, a...) LIBIPW_DEBUG(LIBIPW_DL_WX, f, ## a)
@@ -905,7 +905,7 @@ struct libipw_device {
905 struct libipw_reassoc_request * req); 905 struct libipw_reassoc_request * req);
906 906
907 /* This must be the last item so that it points to the data 907 /* This must be the last item so that it points to the data
908 * allocated beyond this structure by alloc_ieee80211 */ 908 * allocated beyond this structure by alloc_libipw */
909 u8 priv[0]; 909 u8 priv[0];
910}; 910};
911 911
@@ -1017,9 +1017,9 @@ static inline int libipw_is_cck_rate(u8 rate)
1017 return 0; 1017 return 0;
1018} 1018}
1019 1019
1020/* ieee80211.c */ 1020/* libipw.c */
1021extern void free_ieee80211(struct net_device *dev, int monitor); 1021extern void free_libipw(struct net_device *dev, int monitor);
1022extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor); 1022extern struct net_device *alloc_libipw(int sizeof_priv, int monitor);
1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu); 1023extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
1024 1024
1025extern void libipw_networks_age(struct libipw_device *ieee, 1025extern void libipw_networks_age(struct libipw_device *ieee,
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 2fa55867bd8b..55965408ff3f 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -53,7 +53,7 @@
53#include "libipw.h" 53#include "libipw.h"
54 54
55#define DRV_DESCRIPTION "802.11 data/management/control stack" 55#define DRV_DESCRIPTION "802.11 data/management/control stack"
56#define DRV_NAME "ieee80211" 56#define DRV_NAME "libipw"
57#define DRV_VERSION LIBIPW_VERSION 57#define DRV_VERSION LIBIPW_VERSION
58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>" 58#define DRV_COPYRIGHT "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
59 59
@@ -140,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
140} 140}
141EXPORT_SYMBOL(libipw_change_mtu); 141EXPORT_SYMBOL(libipw_change_mtu);
142 142
143struct net_device *alloc_ieee80211(int sizeof_priv, int monitor) 143struct net_device *alloc_libipw(int sizeof_priv, int monitor)
144{ 144{
145 struct libipw_device *ieee; 145 struct libipw_device *ieee;
146 struct net_device *dev; 146 struct net_device *dev;
@@ -222,8 +222,9 @@ failed_free_netdev:
222failed: 222failed:
223 return NULL; 223 return NULL;
224} 224}
225EXPORT_SYMBOL(alloc_libipw);
225 226
226void free_ieee80211(struct net_device *dev, int monitor) 227void free_libipw(struct net_device *dev, int monitor)
227{ 228{
228 struct libipw_device *ieee = netdev_priv(dev); 229 struct libipw_device *ieee = netdev_priv(dev);
229 230
@@ -237,6 +238,7 @@ void free_ieee80211(struct net_device *dev, int monitor)
237 238
238 free_netdev(dev); 239 free_netdev(dev);
239} 240}
241EXPORT_SYMBOL(free_libipw);
240 242
241#ifdef CONFIG_LIBIPW_DEBUG 243#ifdef CONFIG_LIBIPW_DEBUG
242 244
@@ -291,7 +293,7 @@ static int __init libipw_init(void)
291 struct proc_dir_entry *e; 293 struct proc_dir_entry *e;
292 294
293 libipw_debug_level = debug; 295 libipw_debug_level = debug;
294 libipw_proc = proc_mkdir(DRV_NAME, init_net.proc_net); 296 libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
295 if (libipw_proc == NULL) { 297 if (libipw_proc == NULL) {
296 LIBIPW_ERROR("Unable to create " DRV_NAME 298 LIBIPW_ERROR("Unable to create " DRV_NAME
297 " proc directory\n"); 299 " proc directory\n");
@@ -331,6 +333,3 @@ MODULE_PARM_DESC(debug, "debug output mask");
331 333
332module_exit(libipw_exit); 334module_exit(libipw_exit);
333module_init(libipw_init); 335module_init(libipw_init);
334
335EXPORT_SYMBOL(alloc_ieee80211);
336EXPORT_SYMBOL(free_ieee80211);
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 4e378faee650..a684a72eb6e9 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -9,7 +9,9 @@ CFLAGS_iwl-devtrace.o := -I$(src)
9 9
10# AGN 10# AGN
11obj-$(CONFIG_IWLAGN) += iwlagn.o 11obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o 12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o
13 15
14iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 16iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
15iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 17iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 3bf2e6e9b2d9..9a0191a5ea35 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -42,8 +42,9 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-agn.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 47#include "iwl-agn-hw.h"
47#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
48 49
49/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
@@ -117,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
117static int iwl1000_hw_set_hw_params(struct iwl_priv *priv) 118static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
118{ 119{
119 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 120 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
120 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 121 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
121 priv->cfg->num_of_queues = 122 priv->cfg->num_of_queues =
122 priv->cfg->mod_params->num_of_queues; 123 priv->cfg->mod_params->num_of_queues;
123 124
@@ -125,13 +126,13 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
125 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 126 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
126 priv->hw_params.scd_bc_tbls_size = 127 priv->hw_params.scd_bc_tbls_size =
127 priv->cfg->num_of_queues * 128 priv->cfg->num_of_queues *
128 sizeof(struct iwl5000_scd_bc_tbl); 129 sizeof(struct iwlagn_scd_bc_tbl);
129 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 130 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
130 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 131 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
131 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 132 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
132 133
133 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 134 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
134 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 135 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
135 136
136 priv->hw_params.max_bsm_size = 0; 137 priv->hw_params.max_bsm_size = 0;
137 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 138 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -161,25 +162,25 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
161 162
162static struct iwl_lib_ops iwl1000_lib = { 163static struct iwl_lib_ops iwl1000_lib = {
163 .set_hw_params = iwl1000_hw_set_hw_params, 164 .set_hw_params = iwl1000_hw_set_hw_params,
164 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 165 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
165 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 166 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
166 .txq_set_sched = iwl5000_txq_set_sched, 167 .txq_set_sched = iwlagn_txq_set_sched,
167 .txq_agg_enable = iwl5000_txq_agg_enable, 168 .txq_agg_enable = iwlagn_txq_agg_enable,
168 .txq_agg_disable = iwl5000_txq_agg_disable, 169 .txq_agg_disable = iwlagn_txq_agg_disable,
169 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 170 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
170 .txq_free_tfd = iwl_hw_txq_free_tfd, 171 .txq_free_tfd = iwl_hw_txq_free_tfd,
171 .txq_init = iwl_hw_tx_queue_init, 172 .txq_init = iwl_hw_tx_queue_init,
172 .rx_handler_setup = iwl5000_rx_handler_setup, 173 .rx_handler_setup = iwlagn_rx_handler_setup,
173 .setup_deferred_work = iwl5000_setup_deferred_work, 174 .setup_deferred_work = iwlagn_setup_deferred_work,
174 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 175 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
175 .load_ucode = iwl5000_load_ucode, 176 .load_ucode = iwlagn_load_ucode,
176 .dump_nic_event_log = iwl_dump_nic_event_log, 177 .dump_nic_event_log = iwl_dump_nic_event_log,
177 .dump_nic_error_log = iwl_dump_nic_error_log, 178 .dump_nic_error_log = iwl_dump_nic_error_log,
178 .dump_csr = iwl_dump_csr, 179 .dump_csr = iwl_dump_csr,
179 .dump_fh = iwl_dump_fh, 180 .dump_fh = iwl_dump_fh,
180 .init_alive_start = iwl5000_init_alive_start, 181 .init_alive_start = iwlagn_init_alive_start,
181 .alive_notify = iwl5000_alive_notify, 182 .alive_notify = iwlagn_alive_notify,
182 .send_tx_power = iwl5000_send_tx_power, 183 .send_tx_power = iwlagn_send_tx_power,
183 .update_chain_flags = iwl_update_chain_flags, 184 .update_chain_flags = iwl_update_chain_flags,
184 .apm_ops = { 185 .apm_ops = {
185 .init = iwl_apm_init, 186 .init = iwl_apm_init,
@@ -189,40 +190,43 @@ static struct iwl_lib_ops iwl1000_lib = {
189 }, 190 },
190 .eeprom_ops = { 191 .eeprom_ops = {
191 .regulatory_bands = { 192 .regulatory_bands = {
192 EEPROM_5000_REG_BAND_1_CHANNELS, 193 EEPROM_REG_BAND_1_CHANNELS,
193 EEPROM_5000_REG_BAND_2_CHANNELS, 194 EEPROM_REG_BAND_2_CHANNELS,
194 EEPROM_5000_REG_BAND_3_CHANNELS, 195 EEPROM_REG_BAND_3_CHANNELS,
195 EEPROM_5000_REG_BAND_4_CHANNELS, 196 EEPROM_REG_BAND_4_CHANNELS,
196 EEPROM_5000_REG_BAND_5_CHANNELS, 197 EEPROM_REG_BAND_5_CHANNELS,
197 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 198 EEPROM_REG_BAND_24_HT40_CHANNELS,
198 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 199 EEPROM_REG_BAND_52_HT40_CHANNELS
199 }, 200 },
200 .verify_signature = iwlcore_eeprom_verify_signature, 201 .verify_signature = iwlcore_eeprom_verify_signature,
201 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 202 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
202 .release_semaphore = iwlcore_eeprom_release_semaphore, 203 .release_semaphore = iwlcore_eeprom_release_semaphore,
203 .calib_version = iwl5000_eeprom_calib_version, 204 .calib_version = iwlagn_eeprom_calib_version,
204 .query_addr = iwl5000_eeprom_query_addr, 205 .query_addr = iwlagn_eeprom_query_addr,
205 }, 206 },
206 .post_associate = iwl_post_associate, 207 .post_associate = iwl_post_associate,
207 .isr = iwl_isr_ict, 208 .isr = iwl_isr_ict,
208 .config_ap = iwl_config_ap, 209 .config_ap = iwl_config_ap,
209 .temp_ops = { 210 .temp_ops = {
210 .temperature = iwl5000_temperature, 211 .temperature = iwlagn_temperature,
211 .set_ct_kill = iwl1000_set_ct_threshold, 212 .set_ct_kill = iwl1000_set_ct_threshold,
212 }, 213 },
213 .add_bcast_station = iwl_add_bcast_station, 214 .add_bcast_station = iwl_add_bcast_station,
215 .recover_from_tx_stall = iwl_bg_monitor_recover,
216 .check_plcp_health = iwl_good_plcp_health,
217 .check_ack_health = iwl_good_ack_health,
214}; 218};
215 219
216static const struct iwl_ops iwl1000_ops = { 220static const struct iwl_ops iwl1000_ops = {
217 .ucode = &iwl5000_ucode, 221 .ucode = &iwlagn_ucode,
218 .lib = &iwl1000_lib, 222 .lib = &iwl1000_lib,
219 .hcmd = &iwl5000_hcmd, 223 .hcmd = &iwlagn_hcmd,
220 .utils = &iwl5000_hcmd_utils, 224 .utils = &iwlagn_hcmd_utils,
221 .led = &iwlagn_led_ops, 225 .led = &iwlagn_led_ops,
222}; 226};
223 227
224struct iwl_cfg iwl1000_bgn_cfg = { 228struct iwl_cfg iwl1000_bgn_cfg = {
225 .name = "1000 Series BGN", 229 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
226 .fw_name_pre = IWL1000_FW_PRE, 230 .fw_name_pre = IWL1000_FW_PRE,
227 .ucode_api_max = IWL1000_UCODE_API_MAX, 231 .ucode_api_max = IWL1000_UCODE_API_MAX,
228 .ucode_api_min = IWL1000_UCODE_API_MIN, 232 .ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -230,10 +234,10 @@ struct iwl_cfg iwl1000_bgn_cfg = {
230 .ops = &iwl1000_ops, 234 .ops = &iwl1000_ops,
231 .eeprom_size = OTP_LOW_IMAGE_SIZE, 235 .eeprom_size = OTP_LOW_IMAGE_SIZE,
232 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 236 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
233 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 237 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
234 .num_of_queues = IWL50_NUM_QUEUES, 238 .num_of_queues = IWLAGN_NUM_QUEUES,
235 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 239 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
236 .mod_params = &iwl50_mod_params, 240 .mod_params = &iwlagn_mod_params,
237 .valid_tx_ant = ANT_A, 241 .valid_tx_ant = ANT_A,
238 .valid_rx_ant = ANT_AB, 242 .valid_rx_ant = ANT_AB,
239 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 243 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -248,10 +252,12 @@ struct iwl_cfg iwl1000_bgn_cfg = {
248 .support_ct_kill_exit = true, 252 .support_ct_kill_exit = true,
249 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 253 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
250 .chain_noise_scale = 1000, 254 .chain_noise_scale = 1000,
255 .monitor_recover_period = IWL_MONITORING_PERIOD,
256 .max_event_log_size = 128,
251}; 257};
252 258
253struct iwl_cfg iwl1000_bg_cfg = { 259struct iwl_cfg iwl1000_bg_cfg = {
254 .name = "1000 Series BG", 260 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
255 .fw_name_pre = IWL1000_FW_PRE, 261 .fw_name_pre = IWL1000_FW_PRE,
256 .ucode_api_max = IWL1000_UCODE_API_MAX, 262 .ucode_api_max = IWL1000_UCODE_API_MAX,
257 .ucode_api_min = IWL1000_UCODE_API_MIN, 263 .ucode_api_min = IWL1000_UCODE_API_MIN,
@@ -259,10 +265,10 @@ struct iwl_cfg iwl1000_bg_cfg = {
259 .ops = &iwl1000_ops, 265 .ops = &iwl1000_ops,
260 .eeprom_size = OTP_LOW_IMAGE_SIZE, 266 .eeprom_size = OTP_LOW_IMAGE_SIZE,
261 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, 267 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
262 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 268 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
263 .num_of_queues = IWL50_NUM_QUEUES, 269 .num_of_queues = IWLAGN_NUM_QUEUES,
264 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 270 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
265 .mod_params = &iwl50_mod_params, 271 .mod_params = &iwlagn_mod_params,
266 .valid_tx_ant = ANT_A, 272 .valid_tx_ant = ANT_A,
267 .valid_rx_ant = ANT_AB, 273 .valid_rx_ant = ANT_AB,
268 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 274 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -276,6 +282,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
276 .support_ct_kill_exit = true, 282 .support_ct_kill_exit = true,
277 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 283 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
278 .chain_noise_scale = 1000, 284 .chain_noise_scale = 1000,
285 .monitor_recover_period = IWL_MONITORING_PERIOD,
286 .max_event_log_size = 128,
279}; 287};
280 288
281MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 289MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 3a876a8ece38..91bcb4e3cdfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,13 +71,11 @@
71 71
72#include "iwl-eeprom.h" 72#include "iwl-eeprom.h"
73 73
74/* Time constants */
75#define SHORT_SLOT_TIME 9
76#define LONG_SLOT_TIME 20
77
78/* RSSI to dBm */ 74/* RSSI to dBm */
79#define IWL39_RSSI_OFFSET 95 75#define IWL39_RSSI_OFFSET 95
80 76
77#define IWL_DEFAULT_TX_POWER 0x0F
78
81/* 79/*
82 * EEPROM related constants, enums, and structures. 80 * EEPROM related constants, enums, and structures.
83 */ 81 */
@@ -228,7 +226,6 @@ struct iwl3945_eeprom {
228 226
229/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */ 227/* 4 DATA + 1 CMD. There are 2 HCCA queues that are not used. */
230#define IWL39_NUM_QUEUES 5 228#define IWL39_NUM_QUEUES 5
231#define IWL_NUM_SCAN_RATES (2)
232 229
233#define IWL_DEFAULT_TX_RETRY 15 230#define IWL_DEFAULT_TX_RETRY 15
234 231
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index 902c4d4293e9..32eb4709acac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -330,16 +330,25 @@ static void iwl3945_collect_tx_data(struct iwl3945_rs_sta *rs_sta,
330 330
331} 331}
332 332
333static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, 333/*
334 struct ieee80211_sta *sta, void *priv_sta) 334 * Called after adding a new station to initialize rate scaling
335 */
336void iwl3945_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
335{ 337{
336 struct iwl3945_rs_sta *rs_sta = priv_sta; 338 struct ieee80211_hw *hw = priv->hw;
337 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 339 struct ieee80211_conf *conf = &priv->hw->conf;
340 struct iwl3945_sta_priv *psta;
341 struct iwl3945_rs_sta *rs_sta;
342 struct ieee80211_supported_band *sband;
338 int i; 343 int i;
339 344
340 IWL_DEBUG_RATE(priv, "enter\n"); 345 IWL_DEBUG_INFO(priv, "enter\n");
346 if (sta_id == priv->hw_params.bcast_sta_id)
347 goto out;
341 348
342 spin_lock_init(&rs_sta->lock); 349 psta = (struct iwl3945_sta_priv *) sta->drv_priv;
350 rs_sta = &psta->rs_sta;
351 sband = hw->wiphy->bands[conf->channel->band];
343 352
344 rs_sta->priv = priv; 353 rs_sta->priv = priv;
345 354
@@ -352,9 +361,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
352 rs_sta->last_flush = jiffies; 361 rs_sta->last_flush = jiffies;
353 rs_sta->flush_time = IWL_RATE_FLUSH; 362 rs_sta->flush_time = IWL_RATE_FLUSH;
354 rs_sta->last_tx_packets = 0; 363 rs_sta->last_tx_packets = 0;
355 rs_sta->ibss_sta_added = 0;
356 364
357 init_timer(&rs_sta->rate_scale_flush);
358 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta; 365 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
359 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush; 366 rs_sta->rate_scale_flush.function = iwl3945_bg_rate_scale_flush;
360 367
@@ -373,16 +380,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
373 } 380 }
374 } 381 }
375 382
376 priv->sta_supp_rates = sta->supp_rates[sband->band]; 383 priv->_3945.sta_supp_rates = sta->supp_rates[sband->band];
377 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */ 384 /* For 5 GHz band it start at IWL_FIRST_OFDM_RATE */
378 if (sband->band == IEEE80211_BAND_5GHZ) { 385 if (sband->band == IEEE80211_BAND_5GHZ) {
379 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 386 rs_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
380 priv->sta_supp_rates = priv->sta_supp_rates << 387 priv->_3945.sta_supp_rates = priv->_3945.sta_supp_rates <<
381 IWL_FIRST_OFDM_RATE; 388 IWL_FIRST_OFDM_RATE;
382 } 389 }
383 390
391out:
392 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
384 393
385 IWL_DEBUG_RATE(priv, "leave\n"); 394 IWL_DEBUG_INFO(priv, "leave\n");
386} 395}
387 396
388static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir) 397static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
@@ -406,6 +415,9 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
406 415
407 rs_sta = &psta->rs_sta; 416 rs_sta = &psta->rs_sta;
408 417
418 spin_lock_init(&rs_sta->lock);
419 init_timer(&rs_sta->rate_scale_flush);
420
409 IWL_DEBUG_RATE(priv, "leave\n"); 421 IWL_DEBUG_RATE(priv, "leave\n");
410 422
411 return rs_sta; 423 return rs_sta;
@@ -414,13 +426,14 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
414static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta, 426static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
415 void *priv_sta) 427 void *priv_sta)
416{ 428{
417 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 429 struct iwl3945_rs_sta *rs_sta = priv_sta;
418 struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
419 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
420 430
421 IWL_DEBUG_RATE(priv, "enter\n"); 431 /*
432 * Be careful not to use any members of iwl3945_rs_sta (like trying
433 * to use iwl_priv to print out debugging) since it may not be fully
434 * initialized at this point.
435 */
422 del_timer_sync(&rs_sta->rate_scale_flush); 436 del_timer_sync(&rs_sta->rate_scale_flush);
423 IWL_DEBUG_RATE(priv, "leave\n");
424} 437}
425 438
426 439
@@ -459,6 +472,13 @@ static void rs_tx_status(void *priv_rate, struct ieee80211_supported_band *sband
459 return; 472 return;
460 } 473 }
461 474
475 /* Treat uninitialized rate scaling data same as non-existing. */
476 if (!rs_sta->priv) {
477 IWL_DEBUG_RATE(priv, "leave: STA priv data uninitialized!\n");
478 return;
479 }
480
481
462 rs_sta->tx_packets++; 482 rs_sta->tx_packets++;
463 483
464 scale_rate_index = first_index; 484 scale_rate_index = first_index;
@@ -626,14 +646,19 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
626 u32 fail_count; 646 u32 fail_count;
627 s8 scale_action = 0; 647 s8 scale_action = 0;
628 unsigned long flags; 648 unsigned long flags;
629 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
630 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0; 649 u16 rate_mask = sta ? sta->supp_rates[sband->band] : 0;
631 s8 max_rate_idx = -1; 650 s8 max_rate_idx = -1;
632 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 651 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
633 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 652 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
634 653
635 IWL_DEBUG_RATE(priv, "enter\n"); 654 IWL_DEBUG_RATE(priv, "enter\n");
636 655
656 /* Treat uninitialized rate scaling data same as non-existing. */
657 if (rs_sta && !rs_sta->priv) {
658 IWL_DEBUG_RATE(priv, "Rate scaling information not initialized yet.\n");
659 priv_sta = NULL;
660 }
661
637 if (rate_control_send_low(sta, priv_sta, txrc)) 662 if (rate_control_send_low(sta, priv_sta, txrc))
638 return; 663 return;
639 664
@@ -651,20 +676,6 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta,
651 if (sband->band == IEEE80211_BAND_5GHZ) 676 if (sband->band == IEEE80211_BAND_5GHZ)
652 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE; 677 rate_mask = rate_mask << IWL_FIRST_OFDM_RATE;
653 678
654 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
655 !rs_sta->ibss_sta_added) {
656 u8 sta_id = iwl_find_station(priv, hdr->addr1);
657
658 if (sta_id == IWL_INVALID_STATION) {
659 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
660 hdr->addr1);
661 sta_id = iwl_add_station(priv, hdr->addr1, false,
662 CMD_ASYNC, NULL);
663 }
664 if (sta_id != IWL_INVALID_STATION)
665 rs_sta->ibss_sta_added = 1;
666 }
667
668 spin_lock_irqsave(&rs_sta->lock, flags); 679 spin_lock_irqsave(&rs_sta->lock, flags);
669 680
670 /* for recent assoc, choose best rate regarding 681 /* for recent assoc, choose best rate regarding
@@ -884,12 +895,22 @@ static void iwl3945_remove_debugfs(void *priv, void *priv_sta)
884} 895}
885#endif 896#endif
886 897
898/*
899 * Initialization of rate scaling information is done by driver after
900 * the station is added. Since mac80211 calls this function before a
901 * station is added we ignore it.
902 */
903static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
904 struct ieee80211_sta *sta, void *priv_sta)
905{
906}
907
887static struct rate_control_ops rs_ops = { 908static struct rate_control_ops rs_ops = {
888 .module = NULL, 909 .module = NULL,
889 .name = RS_NAME, 910 .name = RS_NAME,
890 .tx_status = rs_tx_status, 911 .tx_status = rs_tx_status,
891 .get_rate = rs_get_rate, 912 .get_rate = rs_get_rate,
892 .rate_init = rs_rate_init, 913 .rate_init = rs_rate_init_stub,
893 .alloc = rs_alloc, 914 .alloc = rs_alloc,
894 .free = rs_free, 915 .free = rs_free,
895 .alloc_sta = rs_alloc_sta, 916 .alloc_sta = rs_alloc_sta,
@@ -900,7 +921,6 @@ static struct rate_control_ops rs_ops = {
900#endif 921#endif
901 922
902}; 923};
903
904void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id) 924void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
905{ 925{
906 struct iwl_priv *priv = hw->priv; 926 struct iwl_priv *priv = hw->priv;
@@ -917,6 +937,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
917 sta = ieee80211_find_sta(priv->vif, 937 sta = ieee80211_find_sta(priv->vif,
918 priv->stations[sta_id].sta.sta.addr); 938 priv->stations[sta_id].sta.sta.addr);
919 if (!sta) { 939 if (!sta) {
940 IWL_DEBUG_RATE(priv, "Unable to find station to initialize rate scaling.\n");
920 rcu_read_unlock(); 941 rcu_read_unlock();
921 return; 942 return;
922 } 943 }
@@ -947,7 +968,7 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
947 968
948 spin_unlock_irqrestore(&rs_sta->lock, flags); 969 spin_unlock_irqrestore(&rs_sta->lock, flags);
949 970
950 rssi = priv->last_rx_rssi; 971 rssi = priv->_3945.last_rx_rssi;
951 if (rssi == 0) 972 if (rssi == 0)
952 rssi = IWL_MIN_RSSI_VAL; 973 rssi = IWL_MIN_RSSI_VAL;
953 974
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 0728054a22d4..bde3b4cbab9d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -192,12 +192,12 @@ static int iwl3945_hwrate_to_plcp_idx(u8 plcp)
192} 192}
193 193
194#ifdef CONFIG_IWLWIFI_DEBUG 194#ifdef CONFIG_IWLWIFI_DEBUG
195#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 195#define TX_STATUS_ENTRY(x) case TX_3945_STATUS_FAIL_ ## x: return #x
196 196
197static const char *iwl3945_get_tx_fail_reason(u32 status) 197static const char *iwl3945_get_tx_fail_reason(u32 status)
198{ 198{
199 switch (status & TX_STATUS_MSK) { 199 switch (status & TX_STATUS_MSK) {
200 case TX_STATUS_SUCCESS: 200 case TX_3945_STATUS_SUCCESS:
201 return "SUCCESS"; 201 return "SUCCESS";
202 TX_STATUS_ENTRY(SHORT_LIMIT); 202 TX_STATUS_ENTRY(SHORT_LIMIT);
203 TX_STATUS_ENTRY(LONG_LIMIT); 203 TX_STATUS_ENTRY(LONG_LIMIT);
@@ -243,7 +243,7 @@ int iwl3945_rs_next_rate(struct iwl_priv *priv, int rate)
243 next_rate = IWL_RATE_6M_INDEX; 243 next_rate = IWL_RATE_6M_INDEX;
244 break; 244 break;
245 case IEEE80211_BAND_2GHZ: 245 case IEEE80211_BAND_2GHZ:
246 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 246 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
247 iwl_is_associated(priv)) { 247 iwl_is_associated(priv)) {
248 if (rate == IWL_RATE_11M_INDEX) 248 if (rate == IWL_RATE_11M_INDEX)
249 next_rate = IWL_RATE_5M_INDEX; 249 next_rate = IWL_RATE_5M_INDEX;
@@ -360,7 +360,7 @@ void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
360 (int)sizeof(struct iwl3945_notif_statistics), 360 (int)sizeof(struct iwl3945_notif_statistics),
361 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 361 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
362 362
363 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); 363 memcpy(&priv->_3945.statistics, pkt->u.raw, sizeof(priv->_3945.statistics));
364} 364}
365 365
366/****************************************************************************** 366/******************************************************************************
@@ -487,7 +487,7 @@ static void _iwl3945_dbg_report_frame(struct iwl_priv *priv,
487 * but you can hack it to show more, if you'd like to. */ 487 * but you can hack it to show more, if you'd like to. */
488 if (dataframe) 488 if (dataframe)
489 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, " 489 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
490 "len=%u, rssi=%d, chnl=%d, rate=%d, \n", 490 "len=%u, rssi=%d, chnl=%d, rate=%d,\n",
491 title, le16_to_cpu(fc), header->addr1[5], 491 title, le16_to_cpu(fc), header->addr1[5],
492 length, rssi, channel, rate); 492 length, rssi, channel, rate);
493 else { 493 else {
@@ -549,7 +549,6 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
549 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 549 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
550 u16 len = le16_to_cpu(rx_hdr->len); 550 u16 len = le16_to_cpu(rx_hdr->len);
551 struct sk_buff *skb; 551 struct sk_buff *skb;
552 int ret;
553 __le16 fc = hdr->frame_control; 552 __le16 fc = hdr->frame_control;
554 553
555 /* We received data from the HW, so stop the watchdog */ 554 /* We received data from the HW, so stop the watchdog */
@@ -566,9 +565,9 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
566 return; 565 return;
567 } 566 }
568 567
569 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC); 568 skb = dev_alloc_skb(128);
570 if (!skb) { 569 if (!skb) {
571 IWL_ERR(priv, "alloc_skb failed\n"); 570 IWL_ERR(priv, "dev_alloc_skb failed\n");
572 return; 571 return;
573 } 572 }
574 573
@@ -577,37 +576,13 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
577 (struct ieee80211_hdr *)rxb_addr(rxb), 576 (struct ieee80211_hdr *)rxb_addr(rxb),
578 le32_to_cpu(rx_end->status), stats); 577 le32_to_cpu(rx_end->status), stats);
579 578
580 skb_reserve(skb, IWL_LINK_HDR_MAX);
581 skb_add_rx_frag(skb, 0, rxb->page, 579 skb_add_rx_frag(skb, 0, rxb->page,
582 (void *)rx_hdr->payload - (void *)pkt, len); 580 (void *)rx_hdr->payload - (void *)pkt, len);
583 581
584 /* mac80211 currently doesn't support paged SKB. Convert it to
585 * linear SKB for management frame and data frame requires
586 * software decryption or software defragementation. */
587 if (ieee80211_is_mgmt(fc) ||
588 ieee80211_has_protected(fc) ||
589 ieee80211_has_morefrags(fc) ||
590 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
591 ret = skb_linearize(skb);
592 else
593 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
594 0 : -ENOMEM;
595
596 if (ret) {
597 kfree_skb(skb);
598 goto out;
599 }
600
601 /*
602 * XXX: We cannot touch the page and its virtual memory (pkt) after
603 * here. It might have already been freed by the above skb change.
604 */
605
606 iwl_update_stats(priv, false, fc, len); 582 iwl_update_stats(priv, false, fc, len);
607 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats)); 583 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
608 584
609 ieee80211_rx(priv->hw, skb); 585 ieee80211_rx(priv->hw, skb);
610 out:
611 priv->alloc_rxb_page--; 586 priv->alloc_rxb_page--;
612 rxb->page = NULL; 587 rxb->page = NULL;
613} 588}
@@ -623,9 +598,8 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
623 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 598 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
624 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 599 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
625 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 600 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
626 int snr; 601 u16 rx_stats_sig_avg __maybe_unused = le16_to_cpu(rx_stats->sig_avg);
627 u16 rx_stats_sig_avg = le16_to_cpu(rx_stats->sig_avg); 602 u16 rx_stats_noise_diff __maybe_unused = le16_to_cpu(rx_stats->noise_diff);
628 u16 rx_stats_noise_diff = le16_to_cpu(rx_stats->noise_diff);
629 u8 network_packet; 603 u8 network_packet;
630 604
631 rx_status.flag = 0; 605 rx_status.flag = 0;
@@ -663,53 +637,29 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
663 /* Convert 3945's rssi indicator to dBm */ 637 /* Convert 3945's rssi indicator to dBm */
664 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET; 638 rx_status.signal = rx_stats->rssi - IWL39_RSSI_OFFSET;
665 639
666 /* Set default noise value to -127 */ 640 IWL_DEBUG_STATS(priv, "Rssi %d sig_avg %d noise_diff %d\n",
667 if (priv->last_rx_noise == 0) 641 rx_status.signal, rx_stats_sig_avg,
668 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 642 rx_stats_noise_diff);
669
670 /* 3945 provides noise info for OFDM frames only.
671 * sig_avg and noise_diff are measured by the 3945's digital signal
672 * processor (DSP), and indicate linear levels of signal level and
673 * distortion/noise within the packet preamble after
674 * automatic gain control (AGC). sig_avg should stay fairly
675 * constant if the radio's AGC is working well.
676 * Since these values are linear (not dB or dBm), linear
677 * signal-to-noise ratio (SNR) is (sig_avg / noise_diff).
678 * Convert linear SNR to dB SNR, then subtract that from rssi dBm
679 * to obtain noise level in dBm.
680 * Calculate rx_status.signal (quality indicator in %) based on SNR. */
681 if (rx_stats_noise_diff) {
682 snr = rx_stats_sig_avg / rx_stats_noise_diff;
683 rx_status.noise = rx_status.signal -
684 iwl3945_calc_db_from_ratio(snr);
685 } else {
686 rx_status.noise = priv->last_rx_noise;
687 }
688
689
690 IWL_DEBUG_STATS(priv, "Rssi %d noise %d sig_avg %d noise_diff %d\n",
691 rx_status.signal, rx_status.noise,
692 rx_stats_sig_avg, rx_stats_noise_diff);
693 643
694 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 644 header = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
695 645
696 network_packet = iwl3945_is_network_packet(priv, header); 646 network_packet = iwl3945_is_network_packet(priv, header);
697 647
698 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Noise:%u, Rate:%u\n", 648 IWL_DEBUG_STATS_LIMIT(priv, "[%c] %d RSSI:%d Signal:%u, Rate:%u\n",
699 network_packet ? '*' : ' ', 649 network_packet ? '*' : ' ',
700 le16_to_cpu(rx_hdr->channel), 650 le16_to_cpu(rx_hdr->channel),
701 rx_status.signal, rx_status.signal, 651 rx_status.signal, rx_status.signal,
702 rx_status.noise, rx_status.rate_idx); 652 rx_status.rate_idx);
703 653
704 /* Set "1" to report good data frames in groups of 100 */ 654 /* Set "1" to report good data frames in groups of 100 */
705 iwl3945_dbg_report_frame(priv, pkt, header, 1); 655 iwl3945_dbg_report_frame(priv, pkt, header, 1);
706 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header); 656 iwl_dbg_log_rx_data_frame(priv, le16_to_cpu(rx_hdr->len), header);
707 657
708 if (network_packet) { 658 if (network_packet) {
709 priv->last_beacon_time = le32_to_cpu(rx_end->beacon_timestamp); 659 priv->_3945.last_beacon_time =
710 priv->last_tsf = le64_to_cpu(rx_end->timestamp); 660 le32_to_cpu(rx_end->beacon_timestamp);
711 priv->last_rx_rssi = rx_status.signal; 661 priv->_3945.last_tsf = le64_to_cpu(rx_end->timestamp);
712 priv->last_rx_noise = rx_status.noise; 662 priv->_3945.last_rx_rssi = rx_status.signal;
713 } 663 }
714 664
715 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status); 665 iwl3945_pass_packet_to_mac80211(priv, rxb, &rx_status);
@@ -957,7 +907,7 @@ static int iwl3945_tx_reset(struct iwl_priv *priv)
957 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005); 907 iwl_write_prph(priv, ALM_SCD_TXF5MF_REG, 0x000005);
958 908
959 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE, 909 iwl_write_direct32(priv, FH39_TSSR_CBB_BASE,
960 priv->shared_phys); 910 priv->_3945.shared_phys);
961 911
962 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG, 912 iwl_write_direct32(priv, FH39_TSSR_MSG_CONFIG,
963 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON | 913 FH39_TSSR_TX_MSG_CONFIG_REG_VAL_SNOOP_RD_TXPD_ON |
@@ -1049,7 +999,7 @@ static void iwl3945_nic_config(struct iwl_priv *priv)
1049 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id); 999 IWL_DEBUG_INFO(priv, "HW Revision ID = 0x%X\n", rev_id);
1050 1000
1051 if (rev_id & PCI_CFG_REV_ID_BIT_RTP) 1001 if (rev_id & PCI_CFG_REV_ID_BIT_RTP)
1052 IWL_DEBUG_INFO(priv, "RTP type \n"); 1002 IWL_DEBUG_INFO(priv, "RTP type\n");
1053 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) { 1003 else if (rev_id & PCI_CFG_REV_ID_BIT_BASIC_SKU) {
1054 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n"); 1004 IWL_DEBUG_INFO(priv, "3945 RADIO-MB type\n");
1055 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 1005 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
@@ -1607,7 +1557,7 @@ static int iwl3945_hw_reg_set_new_power(struct iwl_priv *priv,
1607 int power; 1557 int power;
1608 1558
1609 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1559 /* Get this chnlgrp's rate-to-max/clip-powers table */
1610 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 1560 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1611 1561
1612 /* Get this channel's rate-to-current-power settings table */ 1562 /* Get this channel's rate-to-current-power settings table */
1613 power_info = ch_info->power_info; 1563 power_info = ch_info->power_info;
@@ -1733,7 +1683,7 @@ static int iwl3945_hw_reg_comp_txpower_temp(struct iwl_priv *priv)
1733 } 1683 }
1734 1684
1735 /* Get this chnlgrp's rate-to-max/clip-powers table */ 1685 /* Get this chnlgrp's rate-to-max/clip-powers table */
1736 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 1686 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
1737 1687
1738 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */ 1688 /* set scan tx power, 1Mbit for CCK, 6Mbit for OFDM */
1739 for (scan_tbl_index = 0; 1689 for (scan_tbl_index = 0;
@@ -1911,6 +1861,8 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1911 "configuration (%d).\n", rc); 1861 "configuration (%d).\n", rc);
1912 return rc; 1862 return rc;
1913 } 1863 }
1864 iwl_clear_ucode_stations(priv, false);
1865 iwl_restore_stations(priv);
1914 } 1866 }
1915 1867
1916 IWL_DEBUG_INFO(priv, "Sending RXON\n" 1868 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -1941,7 +1893,10 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1941 1893
1942 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon)); 1894 memcpy(active_rxon, staging_rxon, sizeof(*active_rxon));
1943 1895
1944 iwl_clear_stations_table(priv); 1896 if (!new_assoc) {
1897 iwl_clear_ucode_stations(priv, false);
1898 iwl_restore_stations(priv);
1899 }
1945 1900
1946 /* If we issue a new RXON command which required a tune then we must 1901 /* If we issue a new RXON command which required a tune then we must
1947 * send a new TXPOWER command or we won't be able to Tx any frames */ 1902 * send a new TXPOWER command or we won't be able to Tx any frames */
@@ -1951,19 +1906,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
1951 return rc; 1906 return rc;
1952 } 1907 }
1953 1908
1954 /* Add the broadcast address so we can send broadcast frames */
1955 priv->cfg->ops->lib->add_bcast_station(priv);
1956
1957 /* If we have set the ASSOC_MSK and we are in BSS mode then
1958 * add the IWL_AP_ID to the station rate table */
1959 if (iwl_is_associated(priv) &&
1960 (priv->iw_mode == NL80211_IFTYPE_STATION))
1961 if (iwl_add_station(priv, priv->active_rxon.bssid_addr,
1962 true, CMD_SYNC, NULL) == IWL_INVALID_STATION) {
1963 IWL_ERR(priv, "Error adding AP address for transmit\n");
1964 return -EIO;
1965 }
1966
1967 /* Init the hardware's rate fallback order based on the band */ 1909 /* Init the hardware's rate fallback order based on the band */
1968 rc = iwl3945_init_hw_rate_table(priv); 1910 rc = iwl3945_init_hw_rate_table(priv);
1969 if (rc) { 1911 if (rc) {
@@ -1998,13 +1940,13 @@ void iwl3945_reg_txpower_periodic(struct iwl_priv *priv)
1998 1940
1999 reschedule: 1941 reschedule:
2000 queue_delayed_work(priv->workqueue, 1942 queue_delayed_work(priv->workqueue,
2001 &priv->thermal_periodic, REG_RECALIB_PERIOD * HZ); 1943 &priv->_3945.thermal_periodic, REG_RECALIB_PERIOD * HZ);
2002} 1944}
2003 1945
2004static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work) 1946static void iwl3945_bg_reg_txpower_periodic(struct work_struct *work)
2005{ 1947{
2006 struct iwl_priv *priv = container_of(work, struct iwl_priv, 1948 struct iwl_priv *priv = container_of(work, struct iwl_priv,
2007 thermal_periodic.work); 1949 _3945.thermal_periodic.work);
2008 1950
2009 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1951 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2010 return; 1952 return;
@@ -2140,7 +2082,7 @@ static void iwl3945_hw_reg_init_channel_groups(struct iwl_priv *priv)
2140 * power peaks, without too much distortion (clipping). 2082 * power peaks, without too much distortion (clipping).
2141 */ 2083 */
2142 /* we'll fill in this array with h/w max power levels */ 2084 /* we'll fill in this array with h/w max power levels */
2143 clip_pwrs = (s8 *) priv->clip39_groups[i].clip_powers; 2085 clip_pwrs = (s8 *) priv->_3945.clip_groups[i].clip_powers;
2144 2086
2145 /* divide factory saturation power by 2 to find -3dB level */ 2087 /* divide factory saturation power by 2 to find -3dB level */
2146 satur_pwr = (s8) (group->saturation_power >> 1); 2088 satur_pwr = (s8) (group->saturation_power >> 1);
@@ -2224,7 +2166,7 @@ int iwl3945_txpower_set_from_eeprom(struct iwl_priv *priv)
2224 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info); 2166 iwl3945_hw_reg_get_ch_grp_index(priv, ch_info);
2225 2167
2226 /* Get this chnlgrp's rate->max/clip-powers table */ 2168 /* Get this chnlgrp's rate->max/clip-powers table */
2227 clip_pwrs = priv->clip39_groups[ch_info->group_index].clip_powers; 2169 clip_pwrs = priv->_3945.clip_groups[ch_info->group_index].clip_powers;
2228 2170
2229 /* calculate power index *adjustment* value according to 2171 /* calculate power index *adjustment* value according to
2230 * diff between current temperature and factory temperature */ 2172 * diff between current temperature and factory temperature */
@@ -2332,7 +2274,7 @@ int iwl3945_hw_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq)
2332{ 2274{
2333 int txq_id = txq->q.id; 2275 int txq_id = txq->q.id;
2334 2276
2335 struct iwl3945_shared *shared_data = priv->shared_virt; 2277 struct iwl3945_shared *shared_data = priv->_3945.shared_virt;
2336 2278
2337 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr); 2279 shared_data->tx_base_ptr[txq_id] = cpu_to_le32((u32)txq->q.dma_addr);
2338 2280
@@ -2432,7 +2374,7 @@ int iwl3945_init_hw_rate_table(struct iwl_priv *priv)
2432 /* If an OFDM rate is used, have it fall back to the 2374 /* If an OFDM rate is used, have it fall back to the
2433 * 1M CCK rates */ 2375 * 1M CCK rates */
2434 2376
2435 if (!(priv->sta_supp_rates & IWL_OFDM_RATES_MASK) && 2377 if (!(priv->_3945.sta_supp_rates & IWL_OFDM_RATES_MASK) &&
2436 iwl_is_associated(priv)) { 2378 iwl_is_associated(priv)) {
2437 2379
2438 index = IWL_FIRST_CCK_RATE; 2380 index = IWL_FIRST_CCK_RATE;
@@ -2471,10 +2413,11 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2471 memset((void *)&priv->hw_params, 0, 2413 memset((void *)&priv->hw_params, 0,
2472 sizeof(struct iwl_hw_params)); 2414 sizeof(struct iwl_hw_params));
2473 2415
2474 priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev, 2416 priv->_3945.shared_virt =
2475 sizeof(struct iwl3945_shared), 2417 dma_alloc_coherent(&priv->pci_dev->dev,
2476 &priv->shared_phys, GFP_KERNEL); 2418 sizeof(struct iwl3945_shared),
2477 if (!priv->shared_virt) { 2419 &priv->_3945.shared_phys, GFP_KERNEL);
2420 if (!priv->_3945.shared_virt) {
2478 IWL_ERR(priv, "failed to allocate pci memory\n"); 2421 IWL_ERR(priv, "failed to allocate pci memory\n");
2479 mutex_unlock(&priv->mutex); 2422 mutex_unlock(&priv->mutex);
2480 return -ENOMEM; 2423 return -ENOMEM;
@@ -2537,13 +2480,13 @@ void iwl3945_hw_rx_handler_setup(struct iwl_priv *priv)
2537 2480
2538void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv) 2481void iwl3945_hw_setup_deferred_work(struct iwl_priv *priv)
2539{ 2482{
2540 INIT_DELAYED_WORK(&priv->thermal_periodic, 2483 INIT_DELAYED_WORK(&priv->_3945.thermal_periodic,
2541 iwl3945_bg_reg_txpower_periodic); 2484 iwl3945_bg_reg_txpower_periodic);
2542} 2485}
2543 2486
2544void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv) 2487void iwl3945_hw_cancel_deferred_work(struct iwl_priv *priv)
2545{ 2488{
2546 cancel_delayed_work(&priv->thermal_periodic); 2489 cancel_delayed_work(&priv->_3945.thermal_periodic);
2547} 2490}
2548 2491
2549/* check contents of special bootstrap uCode SRAM */ 2492/* check contents of special bootstrap uCode SRAM */
@@ -2827,6 +2770,8 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2827 .led_compensation = 64, 2770 .led_compensation = 64,
2828 .broken_powersave = true, 2771 .broken_powersave = true,
2829 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2772 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2773 .monitor_recover_period = IWL_MONITORING_PERIOD,
2774 .max_event_log_size = 512,
2830}; 2775};
2831 2776
2832static struct iwl_cfg iwl3945_abg_cfg = { 2777static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2845,6 +2790,8 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2845 .led_compensation = 64, 2790 .led_compensation = 64,
2846 .broken_powersave = true, 2791 .broken_powersave = true,
2847 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2792 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2793 .monitor_recover_period = IWL_MONITORING_PERIOD,
2794 .max_event_log_size = 512,
2848}; 2795};
2849 2796
2850DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { 2797DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 452dfd5456c6..b89219573b91 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -95,7 +95,6 @@ struct iwl3945_rs_sta {
95 u8 tgg; 95 u8 tgg;
96 u8 flush_pending; 96 u8 flush_pending;
97 u8 start_rate; 97 u8 start_rate;
98 u8 ibss_sta_added;
99 struct timer_list rate_scale_flush; 98 struct timer_list rate_scale_flush;
100 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945]; 99 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
101#ifdef CONFIG_MAC80211_DEBUGFS 100#ifdef CONFIG_MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index 67ef562e8db1..cd4b61ae25b7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -81,26 +81,6 @@
81 */ 81 */
82#define IWL49_FIRST_AMPDU_QUEUE 7 82#define IWL49_FIRST_AMPDU_QUEUE 7
83 83
84/* Time constants */
85#define SHORT_SLOT_TIME 9
86#define LONG_SLOT_TIME 20
87
88/* RSSI to dBm */
89#define IWL49_RSSI_OFFSET 44
90
91
92/* PCI registers */
93#define PCI_CFG_RETRY_TIMEOUT 0x041
94
95/* PCI register values */
96#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
97#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
98
99#define IWL_NUM_SCAN_RATES (2)
100
101#define IWL_DEFAULT_TX_RETRY 15
102
103
104/* Sizes and addresses for instruction and data memory (SRAM) in 84/* Sizes and addresses for instruction and data memory (SRAM) in
105 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 85 * 4965's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
106#define IWL49_RTC_INST_LOWER_BOUND (0x000000) 86#define IWL49_RTC_INST_LOWER_BOUND (0x000000)
@@ -393,10 +373,6 @@ static inline int iwl4965_hw_valid_rtc_data_addr(u32 addr)
393 * location(s) in command (struct iwl4965_txpowertable_cmd). 373 * location(s) in command (struct iwl4965_txpowertable_cmd).
394 */ 374 */
395 375
396/* Limit range of txpower output target to be between these values */
397#define IWL_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm = 1 milliwatt */
398#define IWL_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
399
400/** 376/**
401 * When MIMO is used (2 transmitters operating simultaneously), driver should 377 * When MIMO is used (2 transmitters operating simultaneously), driver should
402 * limit each transmitter to deliver a max of 3 dB below the regulatory limit 378 * limit each transmitter to deliver a max of 3 dB below the regulatory limit
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 8972166386cb..2e3cda75f3ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -46,6 +46,7 @@
46#include "iwl-calib.h" 46#include "iwl-calib.h"
47#include "iwl-sta.h" 47#include "iwl-sta.h"
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h"
49 50
50static int iwl4965_send_tx_power(struct iwl_priv *priv); 51static int iwl4965_send_tx_power(struct iwl_priv *priv);
51static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 52static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -60,14 +61,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
60#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode" 61#define _IWL4965_MODULE_FIRMWARE(api) IWL4965_FW_PRE #api ".ucode"
61#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api) 62#define IWL4965_MODULE_FIRMWARE(api) _IWL4965_MODULE_FIRMWARE(api)
62 63
63
64/* module parameters */
65static struct iwl_mod_params iwl4965_mod_params = {
66 .amsdu_size_8K = 1,
67 .restart_fw = 1,
68 /* the rest are 0 by default */
69};
70
71/* check contents of special bootstrap uCode SRAM */ 64/* check contents of special bootstrap uCode SRAM */
72static int iwl4965_verify_bsm(struct iwl_priv *priv) 65static int iwl4965_verify_bsm(struct iwl_priv *priv)
73{ 66{
@@ -417,7 +410,7 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
417 sizeof(cmd), &cmd); 410 sizeof(cmd), &cmd);
418 if (ret) 411 if (ret)
419 IWL_DEBUG_CALIB(priv, "fail sending cmd " 412 IWL_DEBUG_CALIB(priv, "fail sending cmd "
420 "REPLY_PHY_CALIBRATION_CMD \n"); 413 "REPLY_PHY_CALIBRATION_CMD\n");
421 414
422 /* TODO we might want recalculate 415 /* TODO we might want recalculate
423 * rx_chain in rxon cmd */ 416 * rx_chain in rxon cmd */
@@ -502,14 +495,14 @@ static void iwl4965_tx_queue_set_status(struct iwl_priv *priv,
502 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 495 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
503} 496}
504 497
505static const u16 default_queue_to_tx_fifo[] = { 498static const s8 default_queue_to_tx_fifo[] = {
506 IWL_TX_FIFO_AC3, 499 IWL_TX_FIFO_VO,
507 IWL_TX_FIFO_AC2, 500 IWL_TX_FIFO_VI,
508 IWL_TX_FIFO_AC1, 501 IWL_TX_FIFO_BE,
509 IWL_TX_FIFO_AC0, 502 IWL_TX_FIFO_BK,
510 IWL49_CMD_FIFO_NUM, 503 IWL49_CMD_FIFO_NUM,
511 IWL_TX_FIFO_HCCA_1, 504 IWL_TX_FIFO_UNUSED,
512 IWL_TX_FIFO_HCCA_2 505 IWL_TX_FIFO_UNUSED,
513}; 506};
514 507
515static int iwl4965_alive_notify(struct iwl_priv *priv) 508static int iwl4965_alive_notify(struct iwl_priv *priv)
@@ -589,9 +582,15 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
589 /* reset to 0 to enable all the queue first */ 582 /* reset to 0 to enable all the queue first */
590 priv->txq_ctx_active_msk = 0; 583 priv->txq_ctx_active_msk = 0;
591 /* Map each Tx/cmd queue to its corresponding fifo */ 584 /* Map each Tx/cmd queue to its corresponding fifo */
585 BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
592 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) { 586 for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
593 int ac = default_queue_to_tx_fifo[i]; 587 int ac = default_queue_to_tx_fifo[i];
588
594 iwl_txq_ctx_activate(priv, i); 589 iwl_txq_ctx_activate(priv, i);
590
591 if (ac == IWL_TX_FIFO_UNUSED)
592 continue;
593
595 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 594 iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
596 } 595 }
597 596
@@ -1613,19 +1612,19 @@ static int iwl4965_is_temp_calib_needed(struct iwl_priv *priv)
1613 1612
1614 /* get absolute value */ 1613 /* get absolute value */
1615 if (temp_diff < 0) { 1614 if (temp_diff < 0) {
1616 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d, \n", temp_diff); 1615 IWL_DEBUG_POWER(priv, "Getting cooler, delta %d\n", temp_diff);
1617 temp_diff = -temp_diff; 1616 temp_diff = -temp_diff;
1618 } else if (temp_diff == 0) 1617 } else if (temp_diff == 0)
1619 IWL_DEBUG_POWER(priv, "Same temp, \n"); 1618 IWL_DEBUG_POWER(priv, "Temperature unchanged\n");
1620 else 1619 else
1621 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d, \n", temp_diff); 1620 IWL_DEBUG_POWER(priv, "Getting warmer, delta %d\n", temp_diff);
1622 1621
1623 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) { 1622 if (temp_diff < IWL_TEMPERATURE_THRESHOLD) {
1624 IWL_DEBUG_POWER(priv, "Thermal txpower calib not needed\n"); 1623 IWL_DEBUG_POWER(priv, " => thermal txpower calib not needed\n");
1625 return 0; 1624 return 0;
1626 } 1625 }
1627 1626
1628 IWL_DEBUG_POWER(priv, "Thermal txpower calib needed\n"); 1627 IWL_DEBUG_POWER(priv, " => thermal txpower calib needed\n");
1629 1628
1630 return 1; 1629 return 1;
1631} 1630}
@@ -1874,7 +1873,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
1874 info->status.rates[0].count = tx_resp->failure_frame + 1; 1873 info->status.rates[0].count = tx_resp->failure_frame + 1;
1875 info->flags &= ~IEEE80211_TX_CTL_AMPDU; 1874 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1876 info->flags |= iwl_tx_status_to_mac80211(status); 1875 info->flags |= iwl_tx_status_to_mac80211(status);
1877 iwl_hwrate_to_tx_control(priv, rate_n_flags, info); 1876 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
1878 /* FIXME: code repetition end */ 1877 /* FIXME: code repetition end */
1879 1878
1880 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n", 1879 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
@@ -2014,7 +2013,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2014 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd); 2013 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
2015 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn " 2014 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim scd_ssn "
2016 "%d index %d\n", scd_ssn , index); 2015 "%d index %d\n", scd_ssn , index);
2017 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2016 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
2018 if (qc) 2017 if (qc)
2019 iwl_free_tfds_in_queue(priv, sta_id, 2018 iwl_free_tfds_in_queue(priv, sta_id,
2020 tid, freed); 2019 tid, freed);
@@ -2031,7 +2030,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2031 } else { 2030 } else {
2032 info->status.rates[0].count = tx_resp->failure_frame + 1; 2031 info->status.rates[0].count = tx_resp->failure_frame + 1;
2033 info->flags |= iwl_tx_status_to_mac80211(status); 2032 info->flags |= iwl_tx_status_to_mac80211(status);
2034 iwl_hwrate_to_tx_control(priv, 2033 iwlagn_hwrate_to_tx_control(priv,
2035 le32_to_cpu(tx_resp->rate_n_flags), 2034 le32_to_cpu(tx_resp->rate_n_flags),
2036 info); 2035 info);
2037 2036
@@ -2042,7 +2041,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2042 le32_to_cpu(tx_resp->rate_n_flags), 2041 le32_to_cpu(tx_resp->rate_n_flags),
2043 tx_resp->failure_frame); 2042 tx_resp->failure_frame);
2044 2043
2045 freed = iwl_tx_queue_reclaim(priv, txq_id, index); 2044 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
2046 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2045 if (qc && likely(sta_id != IWL_INVALID_STATION))
2047 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 2046 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
2048 else if (sta_id == IWL_INVALID_STATION) 2047 else if (sta_id == IWL_INVALID_STATION)
@@ -2053,10 +2052,9 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2053 iwl_wake_queue(priv, txq_id); 2052 iwl_wake_queue(priv, txq_id);
2054 } 2053 }
2055 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2054 if (qc && likely(sta_id != IWL_INVALID_STATION))
2056 iwl_txq_check_empty(priv, sta_id, tid, txq_id); 2055 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
2057 2056
2058 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK)) 2057 iwl_check_abort_status(priv, tx_resp->frame_count, status);
2059 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
2060} 2058}
2061 2059
2062static int iwl4965_calc_rssi(struct iwl_priv *priv, 2060static int iwl4965_calc_rssi(struct iwl_priv *priv,
@@ -2090,7 +2088,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
2090 2088
2091 /* dBm = max_rssi dB - agc dB - constant. 2089 /* dBm = max_rssi dB - agc dB - constant.
2092 * Higher AGC (higher radio gain) means lower signal. */ 2090 * Higher AGC (higher radio gain) means lower signal. */
2093 return max_rssi - agc - IWL49_RSSI_OFFSET; 2091 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
2094} 2092}
2095 2093
2096 2094
@@ -2098,7 +2096,7 @@ static int iwl4965_calc_rssi(struct iwl_priv *priv,
2098static void iwl4965_rx_handler_setup(struct iwl_priv *priv) 2096static void iwl4965_rx_handler_setup(struct iwl_priv *priv)
2099{ 2097{
2100 /* Legacy Rx frames */ 2098 /* Legacy Rx frames */
2101 priv->rx_handlers[REPLY_RX] = iwl_rx_reply_rx; 2099 priv->rx_handlers[REPLY_RX] = iwlagn_rx_reply_rx;
2102 /* Tx response */ 2100 /* Tx response */
2103 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx; 2101 priv->rx_handlers[REPLY_TX] = iwl4965_rx_reply_tx;
2104} 2102}
@@ -2184,6 +2182,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2184 .load_ucode = iwl4965_load_bsm, 2182 .load_ucode = iwl4965_load_bsm,
2185 .dump_nic_event_log = iwl_dump_nic_event_log, 2183 .dump_nic_event_log = iwl_dump_nic_event_log,
2186 .dump_nic_error_log = iwl_dump_nic_error_log, 2184 .dump_nic_error_log = iwl_dump_nic_error_log,
2185 .dump_fh = iwl_dump_fh,
2187 .set_channel_switch = iwl4965_hw_channel_switch, 2186 .set_channel_switch = iwl4965_hw_channel_switch,
2188 .apm_ops = { 2187 .apm_ops = {
2189 .init = iwl_apm_init, 2188 .init = iwl_apm_init,
@@ -2217,6 +2216,7 @@ static struct iwl_lib_ops iwl4965_lib = {
2217 .set_ct_kill = iwl4965_set_ct_threshold, 2216 .set_ct_kill = iwl4965_set_ct_threshold,
2218 }, 2217 },
2219 .add_bcast_station = iwl_add_bcast_station, 2218 .add_bcast_station = iwl_add_bcast_station,
2219 .check_plcp_health = iwl_good_plcp_health,
2220}; 2220};
2221 2221
2222static const struct iwl_ops iwl4965_ops = { 2222static const struct iwl_ops iwl4965_ops = {
@@ -2228,7 +2228,7 @@ static const struct iwl_ops iwl4965_ops = {
2228}; 2228};
2229 2229
2230struct iwl_cfg iwl4965_agn_cfg = { 2230struct iwl_cfg iwl4965_agn_cfg = {
2231 .name = "4965AGN", 2231 .name = "Intel(R) Wireless WiFi Link 4965AGN",
2232 .fw_name_pre = IWL4965_FW_PRE, 2232 .fw_name_pre = IWL4965_FW_PRE,
2233 .ucode_api_max = IWL4965_UCODE_API_MAX, 2233 .ucode_api_max = IWL4965_UCODE_API_MAX,
2234 .ucode_api_min = IWL4965_UCODE_API_MIN, 2234 .ucode_api_min = IWL4965_UCODE_API_MIN,
@@ -2239,7 +2239,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2239 .ops = &iwl4965_ops, 2239 .ops = &iwl4965_ops,
2240 .num_of_queues = IWL49_NUM_QUEUES, 2240 .num_of_queues = IWL49_NUM_QUEUES,
2241 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES, 2241 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2242 .mod_params = &iwl4965_mod_params, 2242 .mod_params = &iwlagn_mod_params,
2243 .valid_tx_ant = ANT_AB, 2243 .valid_tx_ant = ANT_AB,
2244 .valid_rx_ant = ANT_ABC, 2244 .valid_rx_ant = ANT_ABC,
2245 .pll_cfg_val = 0, 2245 .pll_cfg_val = 0,
@@ -2251,27 +2251,12 @@ struct iwl_cfg iwl4965_agn_cfg = {
2251 .led_compensation = 61, 2251 .led_compensation = 61,
2252 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2252 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2253 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2253 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2254 .monitor_recover_period = IWL_MONITORING_PERIOD,
2255 .temperature_kelvin = true,
2256 .off_channel_workaround = true,
2257 .max_event_log_size = 512,
2254}; 2258};
2255 2259
2256/* Module firmware */ 2260/* Module firmware */
2257MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2261MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2258 2262
2259module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
2260MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2261module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
2262MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2263module_param_named(
2264 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
2265MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2266
2267module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
2268MODULE_PARM_DESC(queues_num, "number of hw queues.");
2269/* 11n */
2270module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
2271MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
2272module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
2273 int, S_IRUGO);
2274MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
2275
2276module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
2277MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index 714e032f6217..146e6431ae95 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -68,25 +68,6 @@
68#ifndef __iwl_5000_hw_h__ 68#ifndef __iwl_5000_hw_h__
69#define __iwl_5000_hw_h__ 69#define __iwl_5000_hw_h__
70 70
71#define IWL50_RTC_INST_LOWER_BOUND (0x000000)
72#define IWL50_RTC_INST_UPPER_BOUND (0x020000)
73
74#define IWL50_RTC_DATA_LOWER_BOUND (0x800000)
75#define IWL50_RTC_DATA_UPPER_BOUND (0x80C000)
76
77#define IWL50_RTC_INST_SIZE (IWL50_RTC_INST_UPPER_BOUND - \
78 IWL50_RTC_INST_LOWER_BOUND)
79#define IWL50_RTC_DATA_SIZE (IWL50_RTC_DATA_UPPER_BOUND - \
80 IWL50_RTC_DATA_LOWER_BOUND)
81
82/* EEPROM */
83#define IWL_5000_EEPROM_IMG_SIZE 2048
84
85#define IWL50_CMD_FIFO_NUM 7
86#define IWL50_NUM_QUEUES 20
87#define IWL50_NUM_AMPDU_QUEUES 10
88#define IWL50_FIRST_AMPDU_QUEUE 10
89
90/* 5150 only */ 71/* 5150 only */
91#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5) 72#define IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF (-5)
92 73
@@ -103,19 +84,5 @@ static inline s32 iwl_temp_calib_to_offset(struct iwl_priv *priv)
103 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF); 84 return (s32)(temperature - voltage / IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF);
104} 85}
105 86
106/* Fixed (non-configurable) rx data from phy */
107
108/**
109 * struct iwl5000_schedq_bc_tbl scheduler byte count table
110 * base physical address of iwl5000_shared
111 * is provided to SCD_DRAM_BASE_ADDR
112 * @tfd_offset 0-12 - tx command byte count
113 * 12-16 - station index
114 */
115struct iwl5000_scd_bc_tbl {
116 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
117} __attribute__ ((packed));
118
119
120#endif /* __iwl_5000_hw_h__ */ 87#endif /* __iwl_5000_hw_h__ */
121 88
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index e476acb53aa7..e967cfcac224 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -19,6 +19,7 @@
19 * file called LICENSE. 19 * file called LICENSE.
20 * 20 *
21 * Contact Information: 21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
22 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
23 * 24 *
24 *****************************************************************************/ 25 *****************************************************************************/
@@ -43,9 +44,10 @@
43#include "iwl-io.h" 44#include "iwl-io.h"
44#include "iwl-sta.h" 45#include "iwl-sta.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
47#include "iwl-agn.h"
46#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn-hw.h"
47#include "iwl-5000-hw.h" 50#include "iwl-5000-hw.h"
48#include "iwl-6000-hw.h"
49 51
50/* Highest firmware API version supported */ 52/* Highest firmware API version supported */
51#define IWL5000_UCODE_API_MAX 2 53#define IWL5000_UCODE_API_MAX 2
@@ -63,18 +65,8 @@
63#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode" 65#define _IWL5150_MODULE_FIRMWARE(api) IWL5150_FW_PRE #api ".ucode"
64#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api) 66#define IWL5150_MODULE_FIRMWARE(api) _IWL5150_MODULE_FIRMWARE(api)
65 67
66static const u16 iwl5000_default_queue_to_tx_fifo[] = {
67 IWL_TX_FIFO_AC3,
68 IWL_TX_FIFO_AC2,
69 IWL_TX_FIFO_AC1,
70 IWL_TX_FIFO_AC0,
71 IWL50_CMD_FIFO_NUM,
72 IWL_TX_FIFO_HCCA_1,
73 IWL_TX_FIFO_HCCA_2
74};
75
76/* NIC configuration for 5000 series */ 68/* NIC configuration for 5000 series */
77void iwl5000_nic_config(struct iwl_priv *priv) 69static void iwl5000_nic_config(struct iwl_priv *priv)
78{ 70{
79 unsigned long flags; 71 unsigned long flags;
80 u16 radio_cfg; 72 u16 radio_cfg;
@@ -107,162 +99,6 @@ void iwl5000_nic_config(struct iwl_priv *priv)
107 spin_unlock_irqrestore(&priv->lock, flags); 99 spin_unlock_irqrestore(&priv->lock, flags);
108} 100}
109 101
110
111/*
112 * EEPROM
113 */
114static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
115{
116 u16 offset = 0;
117
118 if ((address & INDIRECT_ADDRESS) == 0)
119 return address;
120
121 switch (address & INDIRECT_TYPE_MSK) {
122 case INDIRECT_HOST:
123 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
124 break;
125 case INDIRECT_GENERAL:
126 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
127 break;
128 case INDIRECT_REGULATORY:
129 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
130 break;
131 case INDIRECT_CALIBRATION:
132 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
133 break;
134 case INDIRECT_PROCESS_ADJST:
135 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
136 break;
137 case INDIRECT_OTHERS:
138 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
139 break;
140 default:
141 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
142 address & INDIRECT_TYPE_MSK);
143 break;
144 }
145
146 /* translate the offset from words to byte */
147 return (address & ADDRESS_MSK) + (offset << 1);
148}
149
150u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
151{
152 struct iwl_eeprom_calib_hdr {
153 u8 version;
154 u8 pa_type;
155 u16 voltage;
156 } *hdr;
157
158 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
159 EEPROM_5000_CALIB_ALL);
160 return hdr->version;
161
162}
163
164static void iwl5000_gain_computation(struct iwl_priv *priv,
165 u32 average_noise[NUM_RX_CHAINS],
166 u16 min_average_noise_antenna_i,
167 u32 min_average_noise,
168 u8 default_chain)
169{
170 int i;
171 s32 delta_g;
172 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
173
174 /*
175 * Find Gain Code for the chains based on "default chain"
176 */
177 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
178 if ((data->disconn_array[i])) {
179 data->delta_gain_code[i] = 0;
180 continue;
181 }
182
183 delta_g = (priv->cfg->chain_noise_scale *
184 ((s32)average_noise[default_chain] -
185 (s32)average_noise[i])) / 1500;
186
187 /* bound gain by 2 bits value max, 3rd bit is sign */
188 data->delta_gain_code[i] =
189 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
190
191 if (delta_g < 0)
192 /*
193 * set negative sign ...
194 * note to Intel developers: This is uCode API format,
195 * not the format of any internal device registers.
196 * Do not change this format for e.g. 6050 or similar
197 * devices. Change format only if more resolution
198 * (i.e. more than 2 bits magnitude) is needed.
199 */
200 data->delta_gain_code[i] |= (1 << 2);
201 }
202
203 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
204 data->delta_gain_code[1], data->delta_gain_code[2]);
205
206 if (!data->radio_write) {
207 struct iwl_calib_chain_noise_gain_cmd cmd;
208
209 memset(&cmd, 0, sizeof(cmd));
210
211 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
212 cmd.hdr.first_group = 0;
213 cmd.hdr.groups_num = 1;
214 cmd.hdr.data_valid = 1;
215 cmd.delta_gain_1 = data->delta_gain_code[1];
216 cmd.delta_gain_2 = data->delta_gain_code[2];
217 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
218 sizeof(cmd), &cmd, NULL);
219
220 data->radio_write = 1;
221 data->state = IWL_CHAIN_NOISE_CALIBRATED;
222 }
223
224 data->chain_noise_a = 0;
225 data->chain_noise_b = 0;
226 data->chain_noise_c = 0;
227 data->chain_signal_a = 0;
228 data->chain_signal_b = 0;
229 data->chain_signal_c = 0;
230 data->beacon_count = 0;
231}
232
233static void iwl5000_chain_noise_reset(struct iwl_priv *priv)
234{
235 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
236 int ret;
237
238 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
239 struct iwl_calib_chain_noise_reset_cmd cmd;
240 memset(&cmd, 0, sizeof(cmd));
241
242 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
243 cmd.hdr.first_group = 0;
244 cmd.hdr.groups_num = 1;
245 cmd.hdr.data_valid = 1;
246 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
247 sizeof(cmd), &cmd);
248 if (ret)
249 IWL_ERR(priv,
250 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
251 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
252 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
253 }
254}
255
256void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
257 __le32 *tx_flags)
258{
259 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
260 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
261 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
262 else
263 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
264}
265
266static struct iwl_sensitivity_ranges iwl5000_sensitivity = { 102static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
267 .min_nrg_cck = 95, 103 .min_nrg_cck = 95,
268 .max_nrg_cck = 0, /* not used, set to 0 */ 104 .max_nrg_cck = 0, /* not used, set to 0 */
@@ -314,14 +150,6 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
314 .nrg_th_cca = 62, 150 .nrg_th_cca = 62,
315}; 151};
316 152
317const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
318 size_t offset)
319{
320 u32 address = eeprom_indirect_address(priv, offset);
321 BUG_ON(address >= priv->cfg->eeprom_size);
322 return &priv->eeprom[address];
323}
324
325static void iwl5150_set_ct_threshold(struct iwl_priv *priv) 153static void iwl5150_set_ct_threshold(struct iwl_priv *priv)
326{ 154{
327 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF; 155 const s32 volt2temp_coef = IWL_5150_VOLTAGE_TO_TEMPERATURE_COEFF;
@@ -337,356 +165,10 @@ static void iwl5000_set_ct_threshold(struct iwl_priv *priv)
337 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY; 165 priv->hw_params.ct_kill_threshold = CT_KILL_THRESHOLD_LEGACY;
338} 166}
339 167
340/* 168static int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
341 * Calibration
342 */
343static int iwl5000_set_Xtal_calib(struct iwl_priv *priv)
344{
345 struct iwl_calib_xtal_freq_cmd cmd;
346 __le16 *xtal_calib =
347 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
348
349 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
350 cmd.hdr.first_group = 0;
351 cmd.hdr.groups_num = 1;
352 cmd.hdr.data_valid = 1;
353 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
354 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
355 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
356 (u8 *)&cmd, sizeof(cmd));
357}
358
359static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
360{
361 struct iwl_calib_cfg_cmd calib_cfg_cmd;
362 struct iwl_host_cmd cmd = {
363 .id = CALIBRATION_CFG_CMD,
364 .len = sizeof(struct iwl_calib_cfg_cmd),
365 .data = &calib_cfg_cmd,
366 };
367
368 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
369 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
370 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
371 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
372 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
373
374 return iwl_send_cmd(priv, &cmd);
375}
376
377static void iwl5000_rx_calib_result(struct iwl_priv *priv,
378 struct iwl_rx_mem_buffer *rxb)
379{
380 struct iwl_rx_packet *pkt = rxb_addr(rxb);
381 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
382 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
383 int index;
384
385 /* reduce the size of the length field itself */
386 len -= 4;
387
388 /* Define the order in which the results will be sent to the runtime
389 * uCode. iwl_send_calib_results sends them in a row according to their
390 * index. We sort them here */
391 switch (hdr->op_code) {
392 case IWL_PHY_CALIBRATE_DC_CMD:
393 index = IWL_CALIB_DC;
394 break;
395 case IWL_PHY_CALIBRATE_LO_CMD:
396 index = IWL_CALIB_LO;
397 break;
398 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
399 index = IWL_CALIB_TX_IQ;
400 break;
401 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
402 index = IWL_CALIB_TX_IQ_PERD;
403 break;
404 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
405 index = IWL_CALIB_BASE_BAND;
406 break;
407 default:
408 IWL_ERR(priv, "Unknown calibration notification %d\n",
409 hdr->op_code);
410 return;
411 }
412 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
413}
414
415static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
416 struct iwl_rx_mem_buffer *rxb)
417{
418 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
419 queue_work(priv->workqueue, &priv->restart);
420}
421
422/*
423 * ucode
424 */
425static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
426 struct fw_desc *image, u32 dst_addr)
427{
428 dma_addr_t phy_addr = image->p_addr;
429 u32 byte_cnt = image->len;
430 int ret;
431
432 priv->ucode_write_complete = 0;
433
434 iwl_write_direct32(priv,
435 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
436 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
437
438 iwl_write_direct32(priv,
439 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
440
441 iwl_write_direct32(priv,
442 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
443 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
444
445 iwl_write_direct32(priv,
446 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
447 (iwl_get_dma_hi_addr(phy_addr)
448 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
449
450 iwl_write_direct32(priv,
451 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
452 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
453 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
454 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
455
456 iwl_write_direct32(priv,
457 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
458 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
459 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
460 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
461
462 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
463 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
464 priv->ucode_write_complete, 5 * HZ);
465 if (ret == -ERESTARTSYS) {
466 IWL_ERR(priv, "Could not load the %s uCode section due "
467 "to interrupt\n", name);
468 return ret;
469 }
470 if (!ret) {
471 IWL_ERR(priv, "Could not load the %s uCode section\n",
472 name);
473 return -ETIMEDOUT;
474 }
475
476 return 0;
477}
478
479static int iwl5000_load_given_ucode(struct iwl_priv *priv,
480 struct fw_desc *inst_image,
481 struct fw_desc *data_image)
482{
483 int ret = 0;
484
485 ret = iwl5000_load_section(priv, "INST", inst_image,
486 IWL50_RTC_INST_LOWER_BOUND);
487 if (ret)
488 return ret;
489
490 return iwl5000_load_section(priv, "DATA", data_image,
491 IWL50_RTC_DATA_LOWER_BOUND);
492}
493
494int iwl5000_load_ucode(struct iwl_priv *priv)
495{
496 int ret = 0;
497
498 /* check whether init ucode should be loaded, or rather runtime ucode */
499 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
500 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
501 ret = iwl5000_load_given_ucode(priv,
502 &priv->ucode_init, &priv->ucode_init_data);
503 if (!ret) {
504 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
505 priv->ucode_type = UCODE_INIT;
506 }
507 } else {
508 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
509 "Loading runtime ucode...\n");
510 ret = iwl5000_load_given_ucode(priv,
511 &priv->ucode_code, &priv->ucode_data);
512 if (!ret) {
513 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
514 priv->ucode_type = UCODE_RT;
515 }
516 }
517
518 return ret;
519}
520
521void iwl5000_init_alive_start(struct iwl_priv *priv)
522{
523 int ret = 0;
524
525 /* Check alive response for "valid" sign from uCode */
526 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
527 /* We had an error bringing up the hardware, so take it
528 * all the way back down so we can try again */
529 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
530 goto restart;
531 }
532
533 /* initialize uCode was loaded... verify inst image.
534 * This is a paranoid check, because we would not have gotten the
535 * "initialize" alive if code weren't properly loaded. */
536 if (iwl_verify_ucode(priv)) {
537 /* Runtime instruction load was bad;
538 * take it all the way back down so we can try again */
539 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
540 goto restart;
541 }
542
543 iwl_clear_stations_table(priv);
544 ret = priv->cfg->ops->lib->alive_notify(priv);
545 if (ret) {
546 IWL_WARN(priv,
547 "Could not complete ALIVE transition: %d\n", ret);
548 goto restart;
549 }
550
551 iwl5000_send_calib_cfg(priv);
552 return;
553
554restart:
555 /* real restart (first load init_ucode) */
556 queue_work(priv->workqueue, &priv->restart);
557}
558
559static void iwl5000_set_wr_ptrs(struct iwl_priv *priv,
560 int txq_id, u32 index)
561{
562 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
563 (index & 0xff) | (txq_id << 8));
564 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
565}
566
567static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
568 struct iwl_tx_queue *txq,
569 int tx_fifo_id, int scd_retry)
570{
571 int txq_id = txq->q.id;
572 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
573
574 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
575 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
576 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
577 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
578 IWL50_SCD_QUEUE_STTS_REG_MSK);
579
580 txq->sched_retry = scd_retry;
581
582 IWL_DEBUG_INFO(priv, "%s %s Queue %d on AC %d\n",
583 active ? "Activate" : "Deactivate",
584 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
585}
586
587int iwl5000_alive_notify(struct iwl_priv *priv)
588{
589 u32 a;
590 unsigned long flags;
591 int i, chan;
592 u32 reg_val;
593
594 spin_lock_irqsave(&priv->lock, flags);
595
596 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
597 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
598 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
599 a += 4)
600 iwl_write_targ_mem(priv, a, 0);
601 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
602 a += 4)
603 iwl_write_targ_mem(priv, a, 0);
604 for (; a < priv->scd_base_addr +
605 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
606 iwl_write_targ_mem(priv, a, 0);
607
608 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
609 priv->scd_bc_tbls.dma >> 10);
610
611 /* Enable DMA channel */
612 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
613 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
614 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
615 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
616
617 /* Update FH chicken bits */
618 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
619 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
620 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
621
622 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
623 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
624 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
625
626 /* initiate the queues */
627 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
628 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
629 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
630 iwl_write_targ_mem(priv, priv->scd_base_addr +
631 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
632 iwl_write_targ_mem(priv, priv->scd_base_addr +
633 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
634 sizeof(u32),
635 ((SCD_WIN_SIZE <<
636 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
637 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
638 ((SCD_FRAME_LIMIT <<
639 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
640 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
641 }
642
643 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
644 IWL_MASK(0, priv->hw_params.max_txq_num));
645
646 /* Activate all Tx DMA/FIFO channels */
647 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
648
649 iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
650
651 /* make sure all queue are not stopped */
652 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
653 for (i = 0; i < 4; i++)
654 atomic_set(&priv->queue_stop_count[i], 0);
655
656 /* reset to 0 to enable all the queue first */
657 priv->txq_ctx_active_msk = 0;
658 /* map qos queues to fifos one-to-one */
659 for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
660 int ac = iwl5000_default_queue_to_tx_fifo[i];
661 iwl_txq_ctx_activate(priv, i);
662 iwl5000_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
663 }
664
665 /*
666 * TODO - need to initialize these queues and map them to FIFOs
667 * in the loop above, not only mark them as active. We do this
668 * because we want the first aggregation queue to be queue #10,
669 * but do not use 8 or 9 otherwise yet.
670 */
671 iwl_txq_ctx_activate(priv, 7);
672 iwl_txq_ctx_activate(priv, 8);
673 iwl_txq_ctx_activate(priv, 9);
674
675 spin_unlock_irqrestore(&priv->lock, flags);
676
677
678 iwl_send_wimax_coex(priv);
679
680 iwl5000_set_Xtal_calib(priv);
681 iwl_send_calib_results(priv);
682
683 return 0;
684}
685
686int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
687{ 169{
688 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 170 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
689 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 171 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
690 priv->cfg->num_of_queues = 172 priv->cfg->num_of_queues =
691 priv->cfg->mod_params->num_of_queues; 173 priv->cfg->mod_params->num_of_queues;
692 174
@@ -694,13 +176,13 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
694 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 176 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
695 priv->hw_params.scd_bc_tbls_size = 177 priv->hw_params.scd_bc_tbls_size =
696 priv->cfg->num_of_queues * 178 priv->cfg->num_of_queues *
697 sizeof(struct iwl5000_scd_bc_tbl); 179 sizeof(struct iwlagn_scd_bc_tbl);
698 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 180 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
699 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 181 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
700 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 182 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
701 183
702 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE; 184 priv->hw_params.max_data_size = IWLAGN_RTC_DATA_SIZE;
703 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE; 185 priv->hw_params.max_inst_size = IWLAGN_RTC_INST_SIZE;
704 186
705 priv->hw_params.max_bsm_size = 0; 187 priv->hw_params.max_bsm_size = 0;
706 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 188 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -741,547 +223,6 @@ int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
741 return 0; 223 return 0;
742} 224}
743 225
744/**
745 * iwl5000_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
746 */
747void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
748 struct iwl_tx_queue *txq,
749 u16 byte_cnt)
750{
751 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
752 int write_ptr = txq->q.write_ptr;
753 int txq_id = txq->q.id;
754 u8 sec_ctl = 0;
755 u8 sta_id = 0;
756 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
757 __le16 bc_ent;
758
759 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
760
761 if (txq_id != IWL_CMD_QUEUE_NUM) {
762 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
763 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
764
765 switch (sec_ctl & TX_CMD_SEC_MSK) {
766 case TX_CMD_SEC_CCM:
767 len += CCMP_MIC_LEN;
768 break;
769 case TX_CMD_SEC_TKIP:
770 len += TKIP_ICV_LEN;
771 break;
772 case TX_CMD_SEC_WEP:
773 len += WEP_IV_LEN + WEP_ICV_LEN;
774 break;
775 }
776 }
777
778 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
779
780 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
781
782 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
783 scd_bc_tbl[txq_id].
784 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
785}
786
787void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
788 struct iwl_tx_queue *txq)
789{
790 struct iwl5000_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
791 int txq_id = txq->q.id;
792 int read_ptr = txq->q.read_ptr;
793 u8 sta_id = 0;
794 __le16 bc_ent;
795
796 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
797
798 if (txq_id != IWL_CMD_QUEUE_NUM)
799 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
800
801 bc_ent = cpu_to_le16(1 | (sta_id << 12));
802 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
803
804 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
805 scd_bc_tbl[txq_id].
806 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
807}
808
809static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
810 u16 txq_id)
811{
812 u32 tbl_dw_addr;
813 u32 tbl_dw;
814 u16 scd_q2ratid;
815
816 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
817
818 tbl_dw_addr = priv->scd_base_addr +
819 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
820
821 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
822
823 if (txq_id & 0x1)
824 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
825 else
826 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
827
828 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
829
830 return 0;
831}
832static void iwl5000_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
833{
834 /* Simply stop the queue, but don't change any configuration;
835 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
836 iwl_write_prph(priv,
837 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
838 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
839 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
840}
841
842int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
843 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
844{
845 unsigned long flags;
846 u16 ra_tid;
847
848 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
849 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
850 <= txq_id)) {
851 IWL_WARN(priv,
852 "queue number out of range: %d, must be %d to %d\n",
853 txq_id, IWL50_FIRST_AMPDU_QUEUE,
854 IWL50_FIRST_AMPDU_QUEUE +
855 priv->cfg->num_of_ampdu_queues - 1);
856 return -EINVAL;
857 }
858
859 ra_tid = BUILD_RAxTID(sta_id, tid);
860
861 /* Modify device's station table to Tx this TID */
862 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
863
864 spin_lock_irqsave(&priv->lock, flags);
865
866 /* Stop this Tx queue before configuring it */
867 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
868
869 /* Map receiver-address / traffic-ID to this queue */
870 iwl5000_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
871
872 /* Set this queue as a chain-building queue */
873 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
874
875 /* enable aggregations for the queue */
876 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
877
878 /* Place first TFD at index corresponding to start sequence number.
879 * Assumes that ssn_idx is valid (!= 0xFFF) */
880 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
881 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
882 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
883
884 /* Set up Tx window size and frame limit for this queue */
885 iwl_write_targ_mem(priv, priv->scd_base_addr +
886 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
887 sizeof(u32),
888 ((SCD_WIN_SIZE <<
889 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
890 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
891 ((SCD_FRAME_LIMIT <<
892 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
893 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
894
895 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
896
897 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
898 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
899
900 spin_unlock_irqrestore(&priv->lock, flags);
901
902 return 0;
903}
904
905int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
906 u16 ssn_idx, u8 tx_fifo)
907{
908 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
909 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
910 <= txq_id)) {
911 IWL_ERR(priv,
912 "queue number out of range: %d, must be %d to %d\n",
913 txq_id, IWL50_FIRST_AMPDU_QUEUE,
914 IWL50_FIRST_AMPDU_QUEUE +
915 priv->cfg->num_of_ampdu_queues - 1);
916 return -EINVAL;
917 }
918
919 iwl5000_tx_queue_stop_scheduler(priv, txq_id);
920
921 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
922
923 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
924 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
925 /* supposes that ssn_idx is valid (!= 0xFFF) */
926 iwl5000_set_wr_ptrs(priv, txq_id, ssn_idx);
927
928 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
929 iwl_txq_ctx_deactivate(priv, txq_id);
930 iwl5000_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
931
932 return 0;
933}
934
935u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
936{
937 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
938 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
939 memcpy(addsta, cmd, size);
940 /* resrved in 5000 */
941 addsta->rate_n_flags = cpu_to_le16(0);
942 return size;
943}
944
945
946/*
947 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
948 * must be called under priv->lock and mac access
949 */
950void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask)
951{
952 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
953}
954
955
956static inline u32 iwl5000_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
957{
958 return le32_to_cpup((__le32 *)&tx_resp->status +
959 tx_resp->frame_count) & MAX_SN;
960}
961
962static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
963 struct iwl_ht_agg *agg,
964 struct iwl5000_tx_resp *tx_resp,
965 int txq_id, u16 start_idx)
966{
967 u16 status;
968 struct agg_tx_status *frame_status = &tx_resp->status;
969 struct ieee80211_tx_info *info = NULL;
970 struct ieee80211_hdr *hdr = NULL;
971 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
972 int i, sh, idx;
973 u16 seq;
974
975 if (agg->wait_for_ba)
976 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
977
978 agg->frame_count = tx_resp->frame_count;
979 agg->start_idx = start_idx;
980 agg->rate_n_flags = rate_n_flags;
981 agg->bitmap = 0;
982
983 /* # frames attempted by Tx command */
984 if (agg->frame_count == 1) {
985 /* Only one frame was attempted; no block-ack will arrive */
986 status = le16_to_cpu(frame_status[0].status);
987 idx = start_idx;
988
989 /* FIXME: code repetition */
990 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
991 agg->frame_count, agg->start_idx, idx);
992
993 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
994 info->status.rates[0].count = tx_resp->failure_frame + 1;
995 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
996 info->flags |= iwl_tx_status_to_mac80211(status);
997 iwl_hwrate_to_tx_control(priv, rate_n_flags, info);
998
999 /* FIXME: code repetition end */
1000
1001 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
1002 status & 0xff, tx_resp->failure_frame);
1003 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
1004
1005 agg->wait_for_ba = 0;
1006 } else {
1007 /* Two or more frames were attempted; expect block-ack */
1008 u64 bitmap = 0;
1009 int start = agg->start_idx;
1010
1011 /* Construct bit-map of pending frames within Tx window */
1012 for (i = 0; i < agg->frame_count; i++) {
1013 u16 sc;
1014 status = le16_to_cpu(frame_status[i].status);
1015 seq = le16_to_cpu(frame_status[i].sequence);
1016 idx = SEQ_TO_INDEX(seq);
1017 txq_id = SEQ_TO_QUEUE(seq);
1018
1019 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
1020 AGG_TX_STATE_ABORT_MSK))
1021 continue;
1022
1023 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
1024 agg->frame_count, txq_id, idx);
1025
1026 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
1027 if (!hdr) {
1028 IWL_ERR(priv,
1029 "BUG_ON idx doesn't point to valid skb"
1030 " idx=%d, txq_id=%d\n", idx, txq_id);
1031 return -1;
1032 }
1033
1034 sc = le16_to_cpu(hdr->seq_ctrl);
1035 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
1036 IWL_ERR(priv,
1037 "BUG_ON idx doesn't match seq control"
1038 " idx=%d, seq_idx=%d, seq=%d\n",
1039 idx, SEQ_TO_SN(sc),
1040 hdr->seq_ctrl);
1041 return -1;
1042 }
1043
1044 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
1045 i, idx, SEQ_TO_SN(sc));
1046
1047 sh = idx - start;
1048 if (sh > 64) {
1049 sh = (start - idx) + 0xff;
1050 bitmap = bitmap << sh;
1051 sh = 0;
1052 start = idx;
1053 } else if (sh < -64)
1054 sh = 0xff - (start - idx);
1055 else if (sh < 0) {
1056 sh = start - idx;
1057 start = idx;
1058 bitmap = bitmap << sh;
1059 sh = 0;
1060 }
1061 bitmap |= 1ULL << sh;
1062 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
1063 start, (unsigned long long)bitmap);
1064 }
1065
1066 agg->bitmap = bitmap;
1067 agg->start_idx = start;
1068 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
1069 agg->frame_count, agg->start_idx,
1070 (unsigned long long)agg->bitmap);
1071
1072 if (bitmap)
1073 agg->wait_for_ba = 1;
1074 }
1075 return 0;
1076}
1077
1078static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1079 struct iwl_rx_mem_buffer *rxb)
1080{
1081 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1082 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1083 int txq_id = SEQ_TO_QUEUE(sequence);
1084 int index = SEQ_TO_INDEX(sequence);
1085 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1086 struct ieee80211_tx_info *info;
1087 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
1088 u32 status = le16_to_cpu(tx_resp->status.status);
1089 int tid;
1090 int sta_id;
1091 int freed;
1092
1093 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
1094 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
1095 "is out of range [0-%d] %d %d\n", txq_id,
1096 index, txq->q.n_bd, txq->q.write_ptr,
1097 txq->q.read_ptr);
1098 return;
1099 }
1100
1101 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
1102 memset(&info->status, 0, sizeof(info->status));
1103
1104 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
1105 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
1106
1107 if (txq->sched_retry) {
1108 const u32 scd_ssn = iwl5000_get_scd_ssn(tx_resp);
1109 struct iwl_ht_agg *agg = NULL;
1110
1111 agg = &priv->stations[sta_id].tid[tid].agg;
1112
1113 iwl5000_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
1114
1115 /* check if BAR is needed */
1116 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
1117 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1118
1119 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
1120 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
1121 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
1122 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
1123 scd_ssn , index, txq_id, txq->swq_id);
1124
1125 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1126 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1127
1128 if (priv->mac80211_registered &&
1129 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1130 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
1131 if (agg->state == IWL_AGG_OFF)
1132 iwl_wake_queue(priv, txq_id);
1133 else
1134 iwl_wake_queue(priv, txq->swq_id);
1135 }
1136 }
1137 } else {
1138 BUG_ON(txq_id != txq->swq_id);
1139
1140 info->status.rates[0].count = tx_resp->failure_frame + 1;
1141 info->flags |= iwl_tx_status_to_mac80211(status);
1142 iwl_hwrate_to_tx_control(priv,
1143 le32_to_cpu(tx_resp->rate_n_flags),
1144 info);
1145
1146 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
1147 "0x%x retries %d\n",
1148 txq_id,
1149 iwl_get_tx_fail_reason(status), status,
1150 le32_to_cpu(tx_resp->rate_n_flags),
1151 tx_resp->failure_frame);
1152
1153 freed = iwl_tx_queue_reclaim(priv, txq_id, index);
1154 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1155
1156 if (priv->mac80211_registered &&
1157 (iwl_queue_space(&txq->q) > txq->q.low_mark))
1158 iwl_wake_queue(priv, txq_id);
1159 }
1160
1161 iwl_txq_check_empty(priv, sta_id, tid, txq_id);
1162
1163 if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
1164 IWL_ERR(priv, "TODO: Implement Tx ABORT REQUIRED!!!\n");
1165}
1166
1167/* Currently 5000 is the superset of everything */
1168u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len)
1169{
1170 return len;
1171}
1172
1173void iwl5000_setup_deferred_work(struct iwl_priv *priv)
1174{
1175 /* in 5000 the tx power calibration is done in uCode */
1176 priv->disable_tx_power_cal = 1;
1177}
1178
1179void iwl5000_rx_handler_setup(struct iwl_priv *priv)
1180{
1181 /* init calibration handlers */
1182 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
1183 iwl5000_rx_calib_result;
1184 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
1185 iwl5000_rx_calib_complete;
1186 priv->rx_handlers[REPLY_TX] = iwl5000_rx_reply_tx;
1187}
1188
1189
1190int iwl5000_hw_valid_rtc_data_addr(u32 addr)
1191{
1192 return (addr >= IWL50_RTC_DATA_LOWER_BOUND) &&
1193 (addr < IWL50_RTC_DATA_UPPER_BOUND);
1194}
1195
1196static int iwl5000_send_rxon_assoc(struct iwl_priv *priv)
1197{
1198 int ret = 0;
1199 struct iwl5000_rxon_assoc_cmd rxon_assoc;
1200 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
1201 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
1202
1203 if ((rxon1->flags == rxon2->flags) &&
1204 (rxon1->filter_flags == rxon2->filter_flags) &&
1205 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
1206 (rxon1->ofdm_ht_single_stream_basic_rates ==
1207 rxon2->ofdm_ht_single_stream_basic_rates) &&
1208 (rxon1->ofdm_ht_dual_stream_basic_rates ==
1209 rxon2->ofdm_ht_dual_stream_basic_rates) &&
1210 (rxon1->ofdm_ht_triple_stream_basic_rates ==
1211 rxon2->ofdm_ht_triple_stream_basic_rates) &&
1212 (rxon1->acquisition_data == rxon2->acquisition_data) &&
1213 (rxon1->rx_chain == rxon2->rx_chain) &&
1214 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
1215 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
1216 return 0;
1217 }
1218
1219 rxon_assoc.flags = priv->staging_rxon.flags;
1220 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
1221 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
1222 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
1223 rxon_assoc.reserved1 = 0;
1224 rxon_assoc.reserved2 = 0;
1225 rxon_assoc.reserved3 = 0;
1226 rxon_assoc.ofdm_ht_single_stream_basic_rates =
1227 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
1228 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
1229 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
1230 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
1231 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
1232 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
1233 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
1234
1235 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
1236 sizeof(rxon_assoc), &rxon_assoc, NULL);
1237 if (ret)
1238 return ret;
1239
1240 return ret;
1241}
1242int iwl5000_send_tx_power(struct iwl_priv *priv)
1243{
1244 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
1245 u8 tx_ant_cfg_cmd;
1246
1247 /* half dBm need to multiply */
1248 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
1249
1250 if (priv->tx_power_lmt_in_half_dbm &&
1251 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
1252 /*
1253 * For the newer devices which using enhanced/extend tx power
1254 * table in EEPROM, the format is in half dBm. driver need to
1255 * convert to dBm format before report to mac80211.
1256 * By doing so, there is a possibility of 1/2 dBm resolution
1257 * lost. driver will perform "round-up" operation before
1258 * reporting, but it will cause 1/2 dBm tx power over the
1259 * regulatory limit. Perform the checking here, if the
1260 * "tx_power_user_lmt" is higher than EEPROM value (in
1261 * half-dBm format), lower the tx power based on EEPROM
1262 */
1263 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
1264 }
1265 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
1266 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
1267
1268 if (IWL_UCODE_API(priv->ucode_ver) == 1)
1269 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
1270 else
1271 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
1272
1273 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
1274 sizeof(tx_power_cmd), &tx_power_cmd,
1275 NULL);
1276}
1277
1278void iwl5000_temperature(struct iwl_priv *priv)
1279{
1280 /* store temperature from statistics (in Celsius) */
1281 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
1282 iwl_tt_handler(priv);
1283}
1284
1285static void iwl5150_temperature(struct iwl_priv *priv) 226static void iwl5150_temperature(struct iwl_priv *priv)
1286{ 227{
1287 u32 vt = 0; 228 u32 vt = 0;
@@ -1294,100 +235,6 @@ static void iwl5150_temperature(struct iwl_priv *priv)
1294 iwl_tt_handler(priv); 235 iwl_tt_handler(priv);
1295} 236}
1296 237
1297/* Calc max signal level (dBm) among 3 possible receivers */
1298int iwl5000_calc_rssi(struct iwl_priv *priv,
1299 struct iwl_rx_phy_res *rx_resp)
1300{
1301 /* data from PHY/DSP regarding signal strength, etc.,
1302 * contents are always there, not configurable by host
1303 */
1304 struct iwl5000_non_cfg_phy *ncphy =
1305 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
1306 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
1307 u8 agc;
1308
1309 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
1310 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
1311
1312 /* Find max rssi among 3 possible receivers.
1313 * These values are measured by the digital signal processor (DSP).
1314 * They should stay fairly constant even as the signal strength varies,
1315 * if the radio's automatic gain control (AGC) is working right.
1316 * AGC value (see below) will provide the "interesting" info.
1317 */
1318 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
1319 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
1320 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
1321 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
1322 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
1323
1324 max_rssi = max_t(u32, rssi_a, rssi_b);
1325 max_rssi = max_t(u32, max_rssi, rssi_c);
1326
1327 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
1328 rssi_a, rssi_b, rssi_c, max_rssi, agc);
1329
1330 /* dBm = max_rssi dB - agc dB - constant.
1331 * Higher AGC (higher radio gain) means lower signal. */
1332 return max_rssi - agc - IWL49_RSSI_OFFSET;
1333}
1334
1335static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1336{
1337 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1338 .valid = cpu_to_le32(valid_tx_ant),
1339 };
1340
1341 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1342 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1343 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
1344 sizeof(struct iwl_tx_ant_config_cmd),
1345 &tx_ant_cmd);
1346 } else {
1347 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1348 return -EOPNOTSUPP;
1349 }
1350}
1351
1352
1353#define IWL5000_UCODE_GET(item) \
1354static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
1355 u32 api_ver) \
1356{ \
1357 if (api_ver <= 2) \
1358 return le32_to_cpu(ucode->u.v1.item); \
1359 return le32_to_cpu(ucode->u.v2.item); \
1360}
1361
1362static u32 iwl5000_ucode_get_header_size(u32 api_ver)
1363{
1364 if (api_ver <= 2)
1365 return UCODE_HEADER_SIZE(1);
1366 return UCODE_HEADER_SIZE(2);
1367}
1368
1369static u32 iwl5000_ucode_get_build(const struct iwl_ucode_header *ucode,
1370 u32 api_ver)
1371{
1372 if (api_ver <= 2)
1373 return 0;
1374 return le32_to_cpu(ucode->u.v2.build);
1375}
1376
1377static u8 *iwl5000_ucode_get_data(const struct iwl_ucode_header *ucode,
1378 u32 api_ver)
1379{
1380 if (api_ver <= 2)
1381 return (u8 *) ucode->u.v1.data;
1382 return (u8 *) ucode->u.v2.data;
1383}
1384
1385IWL5000_UCODE_GET(inst_size);
1386IWL5000_UCODE_GET(data_size);
1387IWL5000_UCODE_GET(init_size);
1388IWL5000_UCODE_GET(init_data_size);
1389IWL5000_UCODE_GET(boot_size);
1390
1391static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel) 238static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1392{ 239{
1393 struct iwl5000_channel_switch_cmd cmd; 240 struct iwl5000_channel_switch_cmd cmd;
@@ -1420,54 +267,27 @@ static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1420 return iwl_send_cmd_sync(priv, &hcmd); 267 return iwl_send_cmd_sync(priv, &hcmd);
1421} 268}
1422 269
1423struct iwl_hcmd_ops iwl5000_hcmd = { 270static struct iwl_lib_ops iwl5000_lib = {
1424 .rxon_assoc = iwl5000_send_rxon_assoc,
1425 .commit_rxon = iwl_commit_rxon,
1426 .set_rxon_chain = iwl_set_rxon_chain,
1427 .set_tx_ant = iwl5000_send_tx_ant_config,
1428};
1429
1430struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
1431 .get_hcmd_size = iwl5000_get_hcmd_size,
1432 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
1433 .gain_computation = iwl5000_gain_computation,
1434 .chain_noise_reset = iwl5000_chain_noise_reset,
1435 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
1436 .calc_rssi = iwl5000_calc_rssi,
1437};
1438
1439struct iwl_ucode_ops iwl5000_ucode = {
1440 .get_header_size = iwl5000_ucode_get_header_size,
1441 .get_build = iwl5000_ucode_get_build,
1442 .get_inst_size = iwl5000_ucode_get_inst_size,
1443 .get_data_size = iwl5000_ucode_get_data_size,
1444 .get_init_size = iwl5000_ucode_get_init_size,
1445 .get_init_data_size = iwl5000_ucode_get_init_data_size,
1446 .get_boot_size = iwl5000_ucode_get_boot_size,
1447 .get_data = iwl5000_ucode_get_data,
1448};
1449
1450struct iwl_lib_ops iwl5000_lib = {
1451 .set_hw_params = iwl5000_hw_set_hw_params, 271 .set_hw_params = iwl5000_hw_set_hw_params,
1452 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 272 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1453 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 273 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1454 .txq_set_sched = iwl5000_txq_set_sched, 274 .txq_set_sched = iwlagn_txq_set_sched,
1455 .txq_agg_enable = iwl5000_txq_agg_enable, 275 .txq_agg_enable = iwlagn_txq_agg_enable,
1456 .txq_agg_disable = iwl5000_txq_agg_disable, 276 .txq_agg_disable = iwlagn_txq_agg_disable,
1457 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 277 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1458 .txq_free_tfd = iwl_hw_txq_free_tfd, 278 .txq_free_tfd = iwl_hw_txq_free_tfd,
1459 .txq_init = iwl_hw_tx_queue_init, 279 .txq_init = iwl_hw_tx_queue_init,
1460 .rx_handler_setup = iwl5000_rx_handler_setup, 280 .rx_handler_setup = iwlagn_rx_handler_setup,
1461 .setup_deferred_work = iwl5000_setup_deferred_work, 281 .setup_deferred_work = iwlagn_setup_deferred_work,
1462 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 282 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
1463 .dump_nic_event_log = iwl_dump_nic_event_log, 283 .dump_nic_event_log = iwl_dump_nic_event_log,
1464 .dump_nic_error_log = iwl_dump_nic_error_log, 284 .dump_nic_error_log = iwl_dump_nic_error_log,
1465 .dump_csr = iwl_dump_csr, 285 .dump_csr = iwl_dump_csr,
1466 .dump_fh = iwl_dump_fh, 286 .dump_fh = iwl_dump_fh,
1467 .load_ucode = iwl5000_load_ucode, 287 .load_ucode = iwlagn_load_ucode,
1468 .init_alive_start = iwl5000_init_alive_start, 288 .init_alive_start = iwlagn_init_alive_start,
1469 .alive_notify = iwl5000_alive_notify, 289 .alive_notify = iwlagn_alive_notify,
1470 .send_tx_power = iwl5000_send_tx_power, 290 .send_tx_power = iwlagn_send_tx_power,
1471 .update_chain_flags = iwl_update_chain_flags, 291 .update_chain_flags = iwl_update_chain_flags,
1472 .set_channel_switch = iwl5000_hw_channel_switch, 292 .set_channel_switch = iwl5000_hw_channel_switch,
1473 .apm_ops = { 293 .apm_ops = {
@@ -1478,50 +298,53 @@ struct iwl_lib_ops iwl5000_lib = {
1478 }, 298 },
1479 .eeprom_ops = { 299 .eeprom_ops = {
1480 .regulatory_bands = { 300 .regulatory_bands = {
1481 EEPROM_5000_REG_BAND_1_CHANNELS, 301 EEPROM_REG_BAND_1_CHANNELS,
1482 EEPROM_5000_REG_BAND_2_CHANNELS, 302 EEPROM_REG_BAND_2_CHANNELS,
1483 EEPROM_5000_REG_BAND_3_CHANNELS, 303 EEPROM_REG_BAND_3_CHANNELS,
1484 EEPROM_5000_REG_BAND_4_CHANNELS, 304 EEPROM_REG_BAND_4_CHANNELS,
1485 EEPROM_5000_REG_BAND_5_CHANNELS, 305 EEPROM_REG_BAND_5_CHANNELS,
1486 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 306 EEPROM_REG_BAND_24_HT40_CHANNELS,
1487 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 307 EEPROM_REG_BAND_52_HT40_CHANNELS
1488 }, 308 },
1489 .verify_signature = iwlcore_eeprom_verify_signature, 309 .verify_signature = iwlcore_eeprom_verify_signature,
1490 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 310 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1491 .release_semaphore = iwlcore_eeprom_release_semaphore, 311 .release_semaphore = iwlcore_eeprom_release_semaphore,
1492 .calib_version = iwl5000_eeprom_calib_version, 312 .calib_version = iwlagn_eeprom_calib_version,
1493 .query_addr = iwl5000_eeprom_query_addr, 313 .query_addr = iwlagn_eeprom_query_addr,
1494 }, 314 },
1495 .post_associate = iwl_post_associate, 315 .post_associate = iwl_post_associate,
1496 .isr = iwl_isr_ict, 316 .isr = iwl_isr_ict,
1497 .config_ap = iwl_config_ap, 317 .config_ap = iwl_config_ap,
1498 .temp_ops = { 318 .temp_ops = {
1499 .temperature = iwl5000_temperature, 319 .temperature = iwlagn_temperature,
1500 .set_ct_kill = iwl5000_set_ct_threshold, 320 .set_ct_kill = iwl5000_set_ct_threshold,
1501 }, 321 },
1502 .add_bcast_station = iwl_add_bcast_station, 322 .add_bcast_station = iwl_add_bcast_station,
323 .recover_from_tx_stall = iwl_bg_monitor_recover,
324 .check_plcp_health = iwl_good_plcp_health,
325 .check_ack_health = iwl_good_ack_health,
1503}; 326};
1504 327
1505static struct iwl_lib_ops iwl5150_lib = { 328static struct iwl_lib_ops iwl5150_lib = {
1506 .set_hw_params = iwl5000_hw_set_hw_params, 329 .set_hw_params = iwl5000_hw_set_hw_params,
1507 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 330 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
1508 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 331 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
1509 .txq_set_sched = iwl5000_txq_set_sched, 332 .txq_set_sched = iwlagn_txq_set_sched,
1510 .txq_agg_enable = iwl5000_txq_agg_enable, 333 .txq_agg_enable = iwlagn_txq_agg_enable,
1511 .txq_agg_disable = iwl5000_txq_agg_disable, 334 .txq_agg_disable = iwlagn_txq_agg_disable,
1512 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 335 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
1513 .txq_free_tfd = iwl_hw_txq_free_tfd, 336 .txq_free_tfd = iwl_hw_txq_free_tfd,
1514 .txq_init = iwl_hw_tx_queue_init, 337 .txq_init = iwl_hw_tx_queue_init,
1515 .rx_handler_setup = iwl5000_rx_handler_setup, 338 .rx_handler_setup = iwlagn_rx_handler_setup,
1516 .setup_deferred_work = iwl5000_setup_deferred_work, 339 .setup_deferred_work = iwlagn_setup_deferred_work,
1517 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 340 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
1518 .dump_nic_event_log = iwl_dump_nic_event_log, 341 .dump_nic_event_log = iwl_dump_nic_event_log,
1519 .dump_nic_error_log = iwl_dump_nic_error_log, 342 .dump_nic_error_log = iwl_dump_nic_error_log,
1520 .dump_csr = iwl_dump_csr, 343 .dump_csr = iwl_dump_csr,
1521 .load_ucode = iwl5000_load_ucode, 344 .load_ucode = iwlagn_load_ucode,
1522 .init_alive_start = iwl5000_init_alive_start, 345 .init_alive_start = iwlagn_init_alive_start,
1523 .alive_notify = iwl5000_alive_notify, 346 .alive_notify = iwlagn_alive_notify,
1524 .send_tx_power = iwl5000_send_tx_power, 347 .send_tx_power = iwlagn_send_tx_power,
1525 .update_chain_flags = iwl_update_chain_flags, 348 .update_chain_flags = iwl_update_chain_flags,
1526 .set_channel_switch = iwl5000_hw_channel_switch, 349 .set_channel_switch = iwl5000_hw_channel_switch,
1527 .apm_ops = { 350 .apm_ops = {
@@ -1532,19 +355,19 @@ static struct iwl_lib_ops iwl5150_lib = {
1532 }, 355 },
1533 .eeprom_ops = { 356 .eeprom_ops = {
1534 .regulatory_bands = { 357 .regulatory_bands = {
1535 EEPROM_5000_REG_BAND_1_CHANNELS, 358 EEPROM_REG_BAND_1_CHANNELS,
1536 EEPROM_5000_REG_BAND_2_CHANNELS, 359 EEPROM_REG_BAND_2_CHANNELS,
1537 EEPROM_5000_REG_BAND_3_CHANNELS, 360 EEPROM_REG_BAND_3_CHANNELS,
1538 EEPROM_5000_REG_BAND_4_CHANNELS, 361 EEPROM_REG_BAND_4_CHANNELS,
1539 EEPROM_5000_REG_BAND_5_CHANNELS, 362 EEPROM_REG_BAND_5_CHANNELS,
1540 EEPROM_5000_REG_BAND_24_HT40_CHANNELS, 363 EEPROM_REG_BAND_24_HT40_CHANNELS,
1541 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 364 EEPROM_REG_BAND_52_HT40_CHANNELS
1542 }, 365 },
1543 .verify_signature = iwlcore_eeprom_verify_signature, 366 .verify_signature = iwlcore_eeprom_verify_signature,
1544 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 367 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
1545 .release_semaphore = iwlcore_eeprom_release_semaphore, 368 .release_semaphore = iwlcore_eeprom_release_semaphore,
1546 .calib_version = iwl5000_eeprom_calib_version, 369 .calib_version = iwlagn_eeprom_calib_version,
1547 .query_addr = iwl5000_eeprom_query_addr, 370 .query_addr = iwlagn_eeprom_query_addr,
1548 }, 371 },
1549 .post_associate = iwl_post_associate, 372 .post_associate = iwl_post_associate,
1550 .isr = iwl_isr_ict, 373 .isr = iwl_isr_ict,
@@ -1554,44 +377,40 @@ static struct iwl_lib_ops iwl5150_lib = {
1554 .set_ct_kill = iwl5150_set_ct_threshold, 377 .set_ct_kill = iwl5150_set_ct_threshold,
1555 }, 378 },
1556 .add_bcast_station = iwl_add_bcast_station, 379 .add_bcast_station = iwl_add_bcast_station,
380 .recover_from_tx_stall = iwl_bg_monitor_recover,
381 .check_plcp_health = iwl_good_plcp_health,
382 .check_ack_health = iwl_good_ack_health,
1557}; 383};
1558 384
1559static const struct iwl_ops iwl5000_ops = { 385static const struct iwl_ops iwl5000_ops = {
1560 .ucode = &iwl5000_ucode, 386 .ucode = &iwlagn_ucode,
1561 .lib = &iwl5000_lib, 387 .lib = &iwl5000_lib,
1562 .hcmd = &iwl5000_hcmd, 388 .hcmd = &iwlagn_hcmd,
1563 .utils = &iwl5000_hcmd_utils, 389 .utils = &iwlagn_hcmd_utils,
1564 .led = &iwlagn_led_ops, 390 .led = &iwlagn_led_ops,
1565}; 391};
1566 392
1567static const struct iwl_ops iwl5150_ops = { 393static const struct iwl_ops iwl5150_ops = {
1568 .ucode = &iwl5000_ucode, 394 .ucode = &iwlagn_ucode,
1569 .lib = &iwl5150_lib, 395 .lib = &iwl5150_lib,
1570 .hcmd = &iwl5000_hcmd, 396 .hcmd = &iwlagn_hcmd,
1571 .utils = &iwl5000_hcmd_utils, 397 .utils = &iwlagn_hcmd_utils,
1572 .led = &iwlagn_led_ops, 398 .led = &iwlagn_led_ops,
1573}; 399};
1574 400
1575struct iwl_mod_params iwl50_mod_params = {
1576 .amsdu_size_8K = 1,
1577 .restart_fw = 1,
1578 /* the rest are 0 by default */
1579};
1580
1581
1582struct iwl_cfg iwl5300_agn_cfg = { 401struct iwl_cfg iwl5300_agn_cfg = {
1583 .name = "5300AGN", 402 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
1584 .fw_name_pre = IWL5000_FW_PRE, 403 .fw_name_pre = IWL5000_FW_PRE,
1585 .ucode_api_max = IWL5000_UCODE_API_MAX, 404 .ucode_api_max = IWL5000_UCODE_API_MAX,
1586 .ucode_api_min = IWL5000_UCODE_API_MIN, 405 .ucode_api_min = IWL5000_UCODE_API_MIN,
1587 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 406 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1588 .ops = &iwl5000_ops, 407 .ops = &iwl5000_ops,
1589 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 408 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1590 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 409 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1591 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 410 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1592 .num_of_queues = IWL50_NUM_QUEUES, 411 .num_of_queues = IWLAGN_NUM_QUEUES,
1593 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 412 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1594 .mod_params = &iwl50_mod_params, 413 .mod_params = &iwlagn_mod_params,
1595 .valid_tx_ant = ANT_ABC, 414 .valid_tx_ant = ANT_ABC,
1596 .valid_rx_ant = ANT_ABC, 415 .valid_rx_ant = ANT_ABC,
1597 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 416 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1603,21 +422,23 @@ struct iwl_cfg iwl5300_agn_cfg = {
1603 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 422 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1604 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 423 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1605 .chain_noise_scale = 1000, 424 .chain_noise_scale = 1000,
425 .monitor_recover_period = IWL_MONITORING_PERIOD,
426 .max_event_log_size = 512,
1606}; 427};
1607 428
1608struct iwl_cfg iwl5100_bgn_cfg = { 429struct iwl_cfg iwl5100_bgn_cfg = {
1609 .name = "5100BGN", 430 .name = "Intel(R) WiFi Link 5100 BGN",
1610 .fw_name_pre = IWL5000_FW_PRE, 431 .fw_name_pre = IWL5000_FW_PRE,
1611 .ucode_api_max = IWL5000_UCODE_API_MAX, 432 .ucode_api_max = IWL5000_UCODE_API_MAX,
1612 .ucode_api_min = IWL5000_UCODE_API_MIN, 433 .ucode_api_min = IWL5000_UCODE_API_MIN,
1613 .sku = IWL_SKU_G|IWL_SKU_N, 434 .sku = IWL_SKU_G|IWL_SKU_N,
1614 .ops = &iwl5000_ops, 435 .ops = &iwl5000_ops,
1615 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 436 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1616 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 437 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1617 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 438 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1618 .num_of_queues = IWL50_NUM_QUEUES, 439 .num_of_queues = IWLAGN_NUM_QUEUES,
1619 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 440 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1620 .mod_params = &iwl50_mod_params, 441 .mod_params = &iwlagn_mod_params,
1621 .valid_tx_ant = ANT_B, 442 .valid_tx_ant = ANT_B,
1622 .valid_rx_ant = ANT_AB, 443 .valid_rx_ant = ANT_AB,
1623 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 444 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1629,21 +450,23 @@ struct iwl_cfg iwl5100_bgn_cfg = {
1629 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 450 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1630 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 451 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1631 .chain_noise_scale = 1000, 452 .chain_noise_scale = 1000,
453 .monitor_recover_period = IWL_MONITORING_PERIOD,
454 .max_event_log_size = 512,
1632}; 455};
1633 456
1634struct iwl_cfg iwl5100_abg_cfg = { 457struct iwl_cfg iwl5100_abg_cfg = {
1635 .name = "5100ABG", 458 .name = "Intel(R) WiFi Link 5100 ABG",
1636 .fw_name_pre = IWL5000_FW_PRE, 459 .fw_name_pre = IWL5000_FW_PRE,
1637 .ucode_api_max = IWL5000_UCODE_API_MAX, 460 .ucode_api_max = IWL5000_UCODE_API_MAX,
1638 .ucode_api_min = IWL5000_UCODE_API_MIN, 461 .ucode_api_min = IWL5000_UCODE_API_MIN,
1639 .sku = IWL_SKU_A|IWL_SKU_G, 462 .sku = IWL_SKU_A|IWL_SKU_G,
1640 .ops = &iwl5000_ops, 463 .ops = &iwl5000_ops,
1641 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 464 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1642 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 465 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1643 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 466 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1644 .num_of_queues = IWL50_NUM_QUEUES, 467 .num_of_queues = IWLAGN_NUM_QUEUES,
1645 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 468 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1646 .mod_params = &iwl50_mod_params, 469 .mod_params = &iwlagn_mod_params,
1647 .valid_tx_ant = ANT_B, 470 .valid_tx_ant = ANT_B,
1648 .valid_rx_ant = ANT_AB, 471 .valid_rx_ant = ANT_AB,
1649 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 472 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1653,21 +476,23 @@ struct iwl_cfg iwl5100_abg_cfg = {
1653 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 476 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1654 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 477 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1655 .chain_noise_scale = 1000, 478 .chain_noise_scale = 1000,
479 .monitor_recover_period = IWL_MONITORING_PERIOD,
480 .max_event_log_size = 512,
1656}; 481};
1657 482
1658struct iwl_cfg iwl5100_agn_cfg = { 483struct iwl_cfg iwl5100_agn_cfg = {
1659 .name = "5100AGN", 484 .name = "Intel(R) WiFi Link 5100 AGN",
1660 .fw_name_pre = IWL5000_FW_PRE, 485 .fw_name_pre = IWL5000_FW_PRE,
1661 .ucode_api_max = IWL5000_UCODE_API_MAX, 486 .ucode_api_max = IWL5000_UCODE_API_MAX,
1662 .ucode_api_min = IWL5000_UCODE_API_MIN, 487 .ucode_api_min = IWL5000_UCODE_API_MIN,
1663 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 488 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1664 .ops = &iwl5000_ops, 489 .ops = &iwl5000_ops,
1665 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 490 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1666 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 491 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1667 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 492 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1668 .num_of_queues = IWL50_NUM_QUEUES, 493 .num_of_queues = IWLAGN_NUM_QUEUES,
1669 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 494 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1670 .mod_params = &iwl50_mod_params, 495 .mod_params = &iwlagn_mod_params,
1671 .valid_tx_ant = ANT_B, 496 .valid_tx_ant = ANT_B,
1672 .valid_rx_ant = ANT_AB, 497 .valid_rx_ant = ANT_AB,
1673 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 498 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1679,21 +504,23 @@ struct iwl_cfg iwl5100_agn_cfg = {
1679 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 504 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1680 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 505 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1681 .chain_noise_scale = 1000, 506 .chain_noise_scale = 1000,
507 .monitor_recover_period = IWL_MONITORING_PERIOD,
508 .max_event_log_size = 512,
1682}; 509};
1683 510
1684struct iwl_cfg iwl5350_agn_cfg = { 511struct iwl_cfg iwl5350_agn_cfg = {
1685 .name = "5350AGN", 512 .name = "Intel(R) WiMAX/WiFi Link 5350 AGN",
1686 .fw_name_pre = IWL5000_FW_PRE, 513 .fw_name_pre = IWL5000_FW_PRE,
1687 .ucode_api_max = IWL5000_UCODE_API_MAX, 514 .ucode_api_max = IWL5000_UCODE_API_MAX,
1688 .ucode_api_min = IWL5000_UCODE_API_MIN, 515 .ucode_api_min = IWL5000_UCODE_API_MIN,
1689 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 516 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1690 .ops = &iwl5000_ops, 517 .ops = &iwl5000_ops,
1691 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 518 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1692 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 519 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1693 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 520 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1694 .num_of_queues = IWL50_NUM_QUEUES, 521 .num_of_queues = IWLAGN_NUM_QUEUES,
1695 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 522 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1696 .mod_params = &iwl50_mod_params, 523 .mod_params = &iwlagn_mod_params,
1697 .valid_tx_ant = ANT_ABC, 524 .valid_tx_ant = ANT_ABC,
1698 .valid_rx_ant = ANT_ABC, 525 .valid_rx_ant = ANT_ABC,
1699 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 526 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1705,21 +532,23 @@ struct iwl_cfg iwl5350_agn_cfg = {
1705 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 532 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1706 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 533 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1707 .chain_noise_scale = 1000, 534 .chain_noise_scale = 1000,
535 .monitor_recover_period = IWL_MONITORING_PERIOD,
536 .max_event_log_size = 512,
1708}; 537};
1709 538
1710struct iwl_cfg iwl5150_agn_cfg = { 539struct iwl_cfg iwl5150_agn_cfg = {
1711 .name = "5150AGN", 540 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
1712 .fw_name_pre = IWL5150_FW_PRE, 541 .fw_name_pre = IWL5150_FW_PRE,
1713 .ucode_api_max = IWL5150_UCODE_API_MAX, 542 .ucode_api_max = IWL5150_UCODE_API_MAX,
1714 .ucode_api_min = IWL5150_UCODE_API_MIN, 543 .ucode_api_min = IWL5150_UCODE_API_MIN,
1715 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 544 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
1716 .ops = &iwl5150_ops, 545 .ops = &iwl5150_ops,
1717 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 546 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1718 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 547 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1719 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 548 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1720 .num_of_queues = IWL50_NUM_QUEUES, 549 .num_of_queues = IWLAGN_NUM_QUEUES,
1721 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 550 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1722 .mod_params = &iwl50_mod_params, 551 .mod_params = &iwlagn_mod_params,
1723 .valid_tx_ant = ANT_A, 552 .valid_tx_ant = ANT_A,
1724 .valid_rx_ant = ANT_AB, 553 .valid_rx_ant = ANT_AB,
1725 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 554 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1731,21 +560,23 @@ struct iwl_cfg iwl5150_agn_cfg = {
1731 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 560 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1732 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 561 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1733 .chain_noise_scale = 1000, 562 .chain_noise_scale = 1000,
563 .monitor_recover_period = IWL_MONITORING_PERIOD,
564 .max_event_log_size = 512,
1734}; 565};
1735 566
1736struct iwl_cfg iwl5150_abg_cfg = { 567struct iwl_cfg iwl5150_abg_cfg = {
1737 .name = "5150ABG", 568 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
1738 .fw_name_pre = IWL5150_FW_PRE, 569 .fw_name_pre = IWL5150_FW_PRE,
1739 .ucode_api_max = IWL5150_UCODE_API_MAX, 570 .ucode_api_max = IWL5150_UCODE_API_MAX,
1740 .ucode_api_min = IWL5150_UCODE_API_MIN, 571 .ucode_api_min = IWL5150_UCODE_API_MIN,
1741 .sku = IWL_SKU_A|IWL_SKU_G, 572 .sku = IWL_SKU_A|IWL_SKU_G,
1742 .ops = &iwl5150_ops, 573 .ops = &iwl5150_ops,
1743 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 574 .eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
1744 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 575 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1745 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 576 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1746 .num_of_queues = IWL50_NUM_QUEUES, 577 .num_of_queues = IWLAGN_NUM_QUEUES,
1747 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 578 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
1748 .mod_params = &iwl50_mod_params, 579 .mod_params = &iwlagn_mod_params,
1749 .valid_tx_ant = ANT_A, 580 .valid_tx_ant = ANT_A,
1750 .valid_rx_ant = ANT_AB, 581 .valid_rx_ant = ANT_AB,
1751 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, 582 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
@@ -1755,20 +586,9 @@ struct iwl_cfg iwl5150_abg_cfg = {
1755 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 586 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1756 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 587 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
1757 .chain_noise_scale = 1000, 588 .chain_noise_scale = 1000,
589 .monitor_recover_period = IWL_MONITORING_PERIOD,
590 .max_event_log_size = 512,
1758}; 591};
1759 592
1760MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 593MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
1761MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 594MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
1762
1763module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
1764MODULE_PARM_DESC(swcrypto50,
1765 "using software crypto engine (default 0 [hardware])\n");
1766module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
1767MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1768module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
1769MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1770module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
1771 int, S_IRUGO);
1772MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1773module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
1774MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 92b3e64fc14d..3e32693d1c2f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -42,8 +42,9 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-agn.h"
45#include "iwl-helpers.h" 46#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 47#include "iwl-agn-hw.h"
47#include "iwl-6000-hw.h" 48#include "iwl-6000-hw.h"
48#include "iwl-agn-led.h" 49#include "iwl-agn-led.h"
49 50
@@ -56,6 +57,7 @@
56#define IWL6050_UCODE_API_MIN 4 57#define IWL6050_UCODE_API_MIN 4
57 58
58#define IWL6000_FW_PRE "iwlwifi-6000-" 59#define IWL6000_FW_PRE "iwlwifi-6000-"
60#define IWL6000_G2_FW_PRE "iwlwifi-6005-"
59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 61#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
60#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api) 62#define IWL6000_MODULE_FIRMWARE(api) _IWL6000_MODULE_FIRMWARE(api)
61 63
@@ -136,7 +138,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
136static int iwl6000_hw_set_hw_params(struct iwl_priv *priv) 138static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
137{ 139{
138 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES && 140 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
139 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES) 141 priv->cfg->mod_params->num_of_queues <= IWLAGN_NUM_QUEUES)
140 priv->cfg->num_of_queues = 142 priv->cfg->num_of_queues =
141 priv->cfg->mod_params->num_of_queues; 143 priv->cfg->mod_params->num_of_queues;
142 144
@@ -144,7 +146,7 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
144 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 146 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
145 priv->hw_params.scd_bc_tbls_size = 147 priv->hw_params.scd_bc_tbls_size =
146 priv->cfg->num_of_queues * 148 priv->cfg->num_of_queues *
147 sizeof(struct iwl5000_scd_bc_tbl); 149 sizeof(struct iwlagn_scd_bc_tbl);
148 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 150 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
149 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 151 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
150 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 152 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
@@ -225,25 +227,25 @@ static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
225 227
226static struct iwl_lib_ops iwl6000_lib = { 228static struct iwl_lib_ops iwl6000_lib = {
227 .set_hw_params = iwl6000_hw_set_hw_params, 229 .set_hw_params = iwl6000_hw_set_hw_params,
228 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 230 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
229 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 231 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
230 .txq_set_sched = iwl5000_txq_set_sched, 232 .txq_set_sched = iwlagn_txq_set_sched,
231 .txq_agg_enable = iwl5000_txq_agg_enable, 233 .txq_agg_enable = iwlagn_txq_agg_enable,
232 .txq_agg_disable = iwl5000_txq_agg_disable, 234 .txq_agg_disable = iwlagn_txq_agg_disable,
233 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 235 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
234 .txq_free_tfd = iwl_hw_txq_free_tfd, 236 .txq_free_tfd = iwl_hw_txq_free_tfd,
235 .txq_init = iwl_hw_tx_queue_init, 237 .txq_init = iwl_hw_tx_queue_init,
236 .rx_handler_setup = iwl5000_rx_handler_setup, 238 .rx_handler_setup = iwlagn_rx_handler_setup,
237 .setup_deferred_work = iwl5000_setup_deferred_work, 239 .setup_deferred_work = iwlagn_setup_deferred_work,
238 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 240 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
239 .load_ucode = iwl5000_load_ucode, 241 .load_ucode = iwlagn_load_ucode,
240 .dump_nic_event_log = iwl_dump_nic_event_log, 242 .dump_nic_event_log = iwl_dump_nic_event_log,
241 .dump_nic_error_log = iwl_dump_nic_error_log, 243 .dump_nic_error_log = iwl_dump_nic_error_log,
242 .dump_csr = iwl_dump_csr, 244 .dump_csr = iwl_dump_csr,
243 .dump_fh = iwl_dump_fh, 245 .dump_fh = iwl_dump_fh,
244 .init_alive_start = iwl5000_init_alive_start, 246 .init_alive_start = iwlagn_init_alive_start,
245 .alive_notify = iwl5000_alive_notify, 247 .alive_notify = iwlagn_alive_notify,
246 .send_tx_power = iwl5000_send_tx_power, 248 .send_tx_power = iwlagn_send_tx_power,
247 .update_chain_flags = iwl_update_chain_flags, 249 .update_chain_flags = iwl_update_chain_flags,
248 .set_channel_switch = iwl6000_hw_channel_switch, 250 .set_channel_switch = iwl6000_hw_channel_switch,
249 .apm_ops = { 251 .apm_ops = {
@@ -254,60 +256,64 @@ static struct iwl_lib_ops iwl6000_lib = {
254 }, 256 },
255 .eeprom_ops = { 257 .eeprom_ops = {
256 .regulatory_bands = { 258 .regulatory_bands = {
257 EEPROM_5000_REG_BAND_1_CHANNELS, 259 EEPROM_REG_BAND_1_CHANNELS,
258 EEPROM_5000_REG_BAND_2_CHANNELS, 260 EEPROM_REG_BAND_2_CHANNELS,
259 EEPROM_5000_REG_BAND_3_CHANNELS, 261 EEPROM_REG_BAND_3_CHANNELS,
260 EEPROM_5000_REG_BAND_4_CHANNELS, 262 EEPROM_REG_BAND_4_CHANNELS,
261 EEPROM_5000_REG_BAND_5_CHANNELS, 263 EEPROM_REG_BAND_5_CHANNELS,
264 EEPROM_REG_BAND_24_HT40_CHANNELS,
262 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 265 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
263 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 266 EEPROM_REG_BAND_52_HT40_CHANNELS
264 }, 267 },
265 .verify_signature = iwlcore_eeprom_verify_signature, 268 .verify_signature = iwlcore_eeprom_verify_signature,
266 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 269 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
267 .release_semaphore = iwlcore_eeprom_release_semaphore, 270 .release_semaphore = iwlcore_eeprom_release_semaphore,
268 .calib_version = iwl5000_eeprom_calib_version, 271 .calib_version = iwlagn_eeprom_calib_version,
269 .query_addr = iwl5000_eeprom_query_addr, 272 .query_addr = iwlagn_eeprom_query_addr,
270 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 273 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
271 }, 274 },
272 .post_associate = iwl_post_associate, 275 .post_associate = iwl_post_associate,
273 .isr = iwl_isr_ict, 276 .isr = iwl_isr_ict,
274 .config_ap = iwl_config_ap, 277 .config_ap = iwl_config_ap,
275 .temp_ops = { 278 .temp_ops = {
276 .temperature = iwl5000_temperature, 279 .temperature = iwlagn_temperature,
277 .set_ct_kill = iwl6000_set_ct_threshold, 280 .set_ct_kill = iwl6000_set_ct_threshold,
278 }, 281 },
279 .add_bcast_station = iwl_add_bcast_station, 282 .add_bcast_station = iwl_add_bcast_station,
283 .recover_from_tx_stall = iwl_bg_monitor_recover,
284 .check_plcp_health = iwl_good_plcp_health,
285 .check_ack_health = iwl_good_ack_health,
280}; 286};
281 287
282static const struct iwl_ops iwl6000_ops = { 288static const struct iwl_ops iwl6000_ops = {
283 .ucode = &iwl5000_ucode, 289 .ucode = &iwlagn_ucode,
284 .lib = &iwl6000_lib, 290 .lib = &iwl6000_lib,
285 .hcmd = &iwl5000_hcmd, 291 .hcmd = &iwlagn_hcmd,
286 .utils = &iwl5000_hcmd_utils, 292 .utils = &iwlagn_hcmd_utils,
287 .led = &iwlagn_led_ops, 293 .led = &iwlagn_led_ops,
288}; 294};
289 295
290static struct iwl_lib_ops iwl6050_lib = { 296static struct iwl_lib_ops iwl6050_lib = {
291 .set_hw_params = iwl6000_hw_set_hw_params, 297 .set_hw_params = iwl6000_hw_set_hw_params,
292 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 298 .txq_update_byte_cnt_tbl = iwlagn_txq_update_byte_cnt_tbl,
293 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 299 .txq_inval_byte_cnt_tbl = iwlagn_txq_inval_byte_cnt_tbl,
294 .txq_set_sched = iwl5000_txq_set_sched, 300 .txq_set_sched = iwlagn_txq_set_sched,
295 .txq_agg_enable = iwl5000_txq_agg_enable, 301 .txq_agg_enable = iwlagn_txq_agg_enable,
296 .txq_agg_disable = iwl5000_txq_agg_disable, 302 .txq_agg_disable = iwlagn_txq_agg_disable,
297 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd, 303 .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
298 .txq_free_tfd = iwl_hw_txq_free_tfd, 304 .txq_free_tfd = iwl_hw_txq_free_tfd,
299 .txq_init = iwl_hw_tx_queue_init, 305 .txq_init = iwl_hw_tx_queue_init,
300 .rx_handler_setup = iwl5000_rx_handler_setup, 306 .rx_handler_setup = iwlagn_rx_handler_setup,
301 .setup_deferred_work = iwl5000_setup_deferred_work, 307 .setup_deferred_work = iwlagn_setup_deferred_work,
302 .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr, 308 .is_valid_rtc_data_addr = iwlagn_hw_valid_rtc_data_addr,
303 .load_ucode = iwl5000_load_ucode, 309 .load_ucode = iwlagn_load_ucode,
304 .dump_nic_event_log = iwl_dump_nic_event_log, 310 .dump_nic_event_log = iwl_dump_nic_event_log,
305 .dump_nic_error_log = iwl_dump_nic_error_log, 311 .dump_nic_error_log = iwl_dump_nic_error_log,
306 .dump_csr = iwl_dump_csr, 312 .dump_csr = iwl_dump_csr,
307 .dump_fh = iwl_dump_fh, 313 .dump_fh = iwl_dump_fh,
308 .init_alive_start = iwl5000_init_alive_start, 314 .init_alive_start = iwlagn_init_alive_start,
309 .alive_notify = iwl5000_alive_notify, 315 .alive_notify = iwlagn_alive_notify,
310 .send_tx_power = iwl5000_send_tx_power, 316 .send_tx_power = iwlagn_send_tx_power,
311 .update_chain_flags = iwl_update_chain_flags, 317 .update_chain_flags = iwl_update_chain_flags,
312 .set_channel_switch = iwl6000_hw_channel_switch, 318 .set_channel_switch = iwl6000_hw_channel_switch,
313 .apm_ops = { 319 .apm_ops = {
@@ -318,45 +324,82 @@ static struct iwl_lib_ops iwl6050_lib = {
318 }, 324 },
319 .eeprom_ops = { 325 .eeprom_ops = {
320 .regulatory_bands = { 326 .regulatory_bands = {
321 EEPROM_5000_REG_BAND_1_CHANNELS, 327 EEPROM_REG_BAND_1_CHANNELS,
322 EEPROM_5000_REG_BAND_2_CHANNELS, 328 EEPROM_REG_BAND_2_CHANNELS,
323 EEPROM_5000_REG_BAND_3_CHANNELS, 329 EEPROM_REG_BAND_3_CHANNELS,
324 EEPROM_5000_REG_BAND_4_CHANNELS, 330 EEPROM_REG_BAND_4_CHANNELS,
325 EEPROM_5000_REG_BAND_5_CHANNELS, 331 EEPROM_REG_BAND_5_CHANNELS,
326 EEPROM_6000_REG_BAND_24_HT40_CHANNELS, 332 EEPROM_6000_REG_BAND_24_HT40_CHANNELS,
327 EEPROM_5000_REG_BAND_52_HT40_CHANNELS 333 EEPROM_REG_BAND_52_HT40_CHANNELS
328 }, 334 },
329 .verify_signature = iwlcore_eeprom_verify_signature, 335 .verify_signature = iwlcore_eeprom_verify_signature,
330 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore, 336 .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
331 .release_semaphore = iwlcore_eeprom_release_semaphore, 337 .release_semaphore = iwlcore_eeprom_release_semaphore,
332 .calib_version = iwl5000_eeprom_calib_version, 338 .calib_version = iwlagn_eeprom_calib_version,
333 .query_addr = iwl5000_eeprom_query_addr, 339 .query_addr = iwlagn_eeprom_query_addr,
334 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 340 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
335 }, 341 },
336 .post_associate = iwl_post_associate, 342 .post_associate = iwl_post_associate,
337 .isr = iwl_isr_ict, 343 .isr = iwl_isr_ict,
338 .config_ap = iwl_config_ap, 344 .config_ap = iwl_config_ap,
339 .temp_ops = { 345 .temp_ops = {
340 .temperature = iwl5000_temperature, 346 .temperature = iwlagn_temperature,
341 .set_ct_kill = iwl6000_set_ct_threshold, 347 .set_ct_kill = iwl6000_set_ct_threshold,
342 .set_calib_version = iwl6050_set_calib_version, 348 .set_calib_version = iwl6050_set_calib_version,
343 }, 349 },
344 .add_bcast_station = iwl_add_bcast_station, 350 .add_bcast_station = iwl_add_bcast_station,
351 .recover_from_tx_stall = iwl_bg_monitor_recover,
352 .check_plcp_health = iwl_good_plcp_health,
353 .check_ack_health = iwl_good_ack_health,
345}; 354};
346 355
347static const struct iwl_ops iwl6050_ops = { 356static const struct iwl_ops iwl6050_ops = {
348 .ucode = &iwl5000_ucode, 357 .ucode = &iwlagn_ucode,
349 .lib = &iwl6050_lib, 358 .lib = &iwl6050_lib,
350 .hcmd = &iwl5000_hcmd, 359 .hcmd = &iwlagn_hcmd,
351 .utils = &iwl5000_hcmd_utils, 360 .utils = &iwlagn_hcmd_utils,
352 .led = &iwlagn_led_ops, 361 .led = &iwlagn_led_ops,
353}; 362};
354 363
355/* 364/*
356 * "i": Internal configuration, use internal Power Amplifier 365 * "i": Internal configuration, use internal Power Amplifier
357 */ 366 */
367struct iwl_cfg iwl6000i_g2_2agn_cfg = {
368 .name = "6000 Series 2x2 AGN Gen2",
369 .fw_name_pre = IWL6000_G2_FW_PRE,
370 .ucode_api_max = IWL6000_UCODE_API_MAX,
371 .ucode_api_min = IWL6000_UCODE_API_MIN,
372 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
373 .ops = &iwl6000_ops,
374 .eeprom_size = OTP_LOW_IMAGE_SIZE,
375 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
376 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
377 .num_of_queues = IWLAGN_NUM_QUEUES,
378 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
379 .mod_params = &iwlagn_mod_params,
380 .valid_tx_ant = ANT_AB,
381 .valid_rx_ant = ANT_AB,
382 .pll_cfg_val = 0,
383 .set_l0s = true,
384 .use_bsm = false,
385 .pa_type = IWL_PA_INTERNAL,
386 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
387 .shadow_ram_support = true,
388 .ht_greenfield_support = true,
389 .led_compensation = 51,
390 .use_rts_for_ht = true, /* use rts/cts protection */
391 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
392 .supports_idle = true,
393 .adv_thermal_throttle = true,
394 .support_ct_kill_exit = true,
395 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
396 .chain_noise_scale = 1000,
397 .monitor_recover_period = IWL_MONITORING_PERIOD,
398 .max_event_log_size = 1024,
399};
400
358struct iwl_cfg iwl6000i_2agn_cfg = { 401struct iwl_cfg iwl6000i_2agn_cfg = {
359 .name = "6000 Series 2x2 AGN", 402 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
360 .fw_name_pre = IWL6000_FW_PRE, 403 .fw_name_pre = IWL6000_FW_PRE,
361 .ucode_api_max = IWL6000_UCODE_API_MAX, 404 .ucode_api_max = IWL6000_UCODE_API_MAX,
362 .ucode_api_min = IWL6000_UCODE_API_MIN, 405 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -364,10 +407,10 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
364 .ops = &iwl6000_ops, 407 .ops = &iwl6000_ops,
365 .eeprom_size = OTP_LOW_IMAGE_SIZE, 408 .eeprom_size = OTP_LOW_IMAGE_SIZE,
366 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 409 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
367 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 410 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
368 .num_of_queues = IWL50_NUM_QUEUES, 411 .num_of_queues = IWLAGN_NUM_QUEUES,
369 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 412 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
370 .mod_params = &iwl50_mod_params, 413 .mod_params = &iwlagn_mod_params,
371 .valid_tx_ant = ANT_BC, 414 .valid_tx_ant = ANT_BC,
372 .valid_rx_ant = ANT_BC, 415 .valid_rx_ant = ANT_BC,
373 .pll_cfg_val = 0, 416 .pll_cfg_val = 0,
@@ -385,10 +428,12 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
385 .support_ct_kill_exit = true, 428 .support_ct_kill_exit = true,
386 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 429 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
387 .chain_noise_scale = 1000, 430 .chain_noise_scale = 1000,
431 .monitor_recover_period = IWL_MONITORING_PERIOD,
432 .max_event_log_size = 1024,
388}; 433};
389 434
390struct iwl_cfg iwl6000i_2abg_cfg = { 435struct iwl_cfg iwl6000i_2abg_cfg = {
391 .name = "6000 Series 2x2 ABG", 436 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
392 .fw_name_pre = IWL6000_FW_PRE, 437 .fw_name_pre = IWL6000_FW_PRE,
393 .ucode_api_max = IWL6000_UCODE_API_MAX, 438 .ucode_api_max = IWL6000_UCODE_API_MAX,
394 .ucode_api_min = IWL6000_UCODE_API_MIN, 439 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -396,10 +441,10 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
396 .ops = &iwl6000_ops, 441 .ops = &iwl6000_ops,
397 .eeprom_size = OTP_LOW_IMAGE_SIZE, 442 .eeprom_size = OTP_LOW_IMAGE_SIZE,
398 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 443 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
399 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 444 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
400 .num_of_queues = IWL50_NUM_QUEUES, 445 .num_of_queues = IWLAGN_NUM_QUEUES,
401 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 446 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
402 .mod_params = &iwl50_mod_params, 447 .mod_params = &iwlagn_mod_params,
403 .valid_tx_ant = ANT_BC, 448 .valid_tx_ant = ANT_BC,
404 .valid_rx_ant = ANT_BC, 449 .valid_rx_ant = ANT_BC,
405 .pll_cfg_val = 0, 450 .pll_cfg_val = 0,
@@ -416,10 +461,12 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
416 .support_ct_kill_exit = true, 461 .support_ct_kill_exit = true,
417 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 462 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
418 .chain_noise_scale = 1000, 463 .chain_noise_scale = 1000,
464 .monitor_recover_period = IWL_MONITORING_PERIOD,
465 .max_event_log_size = 1024,
419}; 466};
420 467
421struct iwl_cfg iwl6000i_2bg_cfg = { 468struct iwl_cfg iwl6000i_2bg_cfg = {
422 .name = "6000 Series 2x2 BG", 469 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
423 .fw_name_pre = IWL6000_FW_PRE, 470 .fw_name_pre = IWL6000_FW_PRE,
424 .ucode_api_max = IWL6000_UCODE_API_MAX, 471 .ucode_api_max = IWL6000_UCODE_API_MAX,
425 .ucode_api_min = IWL6000_UCODE_API_MIN, 472 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -427,10 +474,10 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
427 .ops = &iwl6000_ops, 474 .ops = &iwl6000_ops,
428 .eeprom_size = OTP_LOW_IMAGE_SIZE, 475 .eeprom_size = OTP_LOW_IMAGE_SIZE,
429 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 476 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
430 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 477 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
431 .num_of_queues = IWL50_NUM_QUEUES, 478 .num_of_queues = IWLAGN_NUM_QUEUES,
432 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 479 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
433 .mod_params = &iwl50_mod_params, 480 .mod_params = &iwlagn_mod_params,
434 .valid_tx_ant = ANT_BC, 481 .valid_tx_ant = ANT_BC,
435 .valid_rx_ant = ANT_BC, 482 .valid_rx_ant = ANT_BC,
436 .pll_cfg_val = 0, 483 .pll_cfg_val = 0,
@@ -447,10 +494,12 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
447 .support_ct_kill_exit = true, 494 .support_ct_kill_exit = true,
448 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 495 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
449 .chain_noise_scale = 1000, 496 .chain_noise_scale = 1000,
497 .monitor_recover_period = IWL_MONITORING_PERIOD,
498 .max_event_log_size = 1024,
450}; 499};
451 500
452struct iwl_cfg iwl6050_2agn_cfg = { 501struct iwl_cfg iwl6050_2agn_cfg = {
453 .name = "6050 Series 2x2 AGN", 502 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
454 .fw_name_pre = IWL6050_FW_PRE, 503 .fw_name_pre = IWL6050_FW_PRE,
455 .ucode_api_max = IWL6050_UCODE_API_MAX, 504 .ucode_api_max = IWL6050_UCODE_API_MAX,
456 .ucode_api_min = IWL6050_UCODE_API_MIN, 505 .ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -458,10 +507,10 @@ struct iwl_cfg iwl6050_2agn_cfg = {
458 .ops = &iwl6050_ops, 507 .ops = &iwl6050_ops,
459 .eeprom_size = OTP_LOW_IMAGE_SIZE, 508 .eeprom_size = OTP_LOW_IMAGE_SIZE,
460 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 509 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
461 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 510 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
462 .num_of_queues = IWL50_NUM_QUEUES, 511 .num_of_queues = IWLAGN_NUM_QUEUES,
463 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 512 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
464 .mod_params = &iwl50_mod_params, 513 .mod_params = &iwlagn_mod_params,
465 .valid_tx_ant = ANT_AB, 514 .valid_tx_ant = ANT_AB,
466 .valid_rx_ant = ANT_AB, 515 .valid_rx_ant = ANT_AB,
467 .pll_cfg_val = 0, 516 .pll_cfg_val = 0,
@@ -479,10 +528,12 @@ struct iwl_cfg iwl6050_2agn_cfg = {
479 .support_ct_kill_exit = true, 528 .support_ct_kill_exit = true,
480 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 529 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
481 .chain_noise_scale = 1500, 530 .chain_noise_scale = 1500,
531 .monitor_recover_period = IWL_MONITORING_PERIOD,
532 .max_event_log_size = 1024,
482}; 533};
483 534
484struct iwl_cfg iwl6050_2abg_cfg = { 535struct iwl_cfg iwl6050_2abg_cfg = {
485 .name = "6050 Series 2x2 ABG", 536 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
486 .fw_name_pre = IWL6050_FW_PRE, 537 .fw_name_pre = IWL6050_FW_PRE,
487 .ucode_api_max = IWL6050_UCODE_API_MAX, 538 .ucode_api_max = IWL6050_UCODE_API_MAX,
488 .ucode_api_min = IWL6050_UCODE_API_MIN, 539 .ucode_api_min = IWL6050_UCODE_API_MIN,
@@ -490,10 +541,10 @@ struct iwl_cfg iwl6050_2abg_cfg = {
490 .ops = &iwl6050_ops, 541 .ops = &iwl6050_ops,
491 .eeprom_size = OTP_LOW_IMAGE_SIZE, 542 .eeprom_size = OTP_LOW_IMAGE_SIZE,
492 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, 543 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
493 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 544 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
494 .num_of_queues = IWL50_NUM_QUEUES, 545 .num_of_queues = IWLAGN_NUM_QUEUES,
495 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 546 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
496 .mod_params = &iwl50_mod_params, 547 .mod_params = &iwlagn_mod_params,
497 .valid_tx_ant = ANT_AB, 548 .valid_tx_ant = ANT_AB,
498 .valid_rx_ant = ANT_AB, 549 .valid_rx_ant = ANT_AB,
499 .pll_cfg_val = 0, 550 .pll_cfg_val = 0,
@@ -510,10 +561,12 @@ struct iwl_cfg iwl6050_2abg_cfg = {
510 .support_ct_kill_exit = true, 561 .support_ct_kill_exit = true,
511 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 562 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
512 .chain_noise_scale = 1500, 563 .chain_noise_scale = 1500,
564 .monitor_recover_period = IWL_MONITORING_PERIOD,
565 .max_event_log_size = 1024,
513}; 566};
514 567
515struct iwl_cfg iwl6000_3agn_cfg = { 568struct iwl_cfg iwl6000_3agn_cfg = {
516 .name = "6000 Series 3x3 AGN", 569 .name = "Intel(R) Centrino(R) Ultimate-N 6300 AGN",
517 .fw_name_pre = IWL6000_FW_PRE, 570 .fw_name_pre = IWL6000_FW_PRE,
518 .ucode_api_max = IWL6000_UCODE_API_MAX, 571 .ucode_api_max = IWL6000_UCODE_API_MAX,
519 .ucode_api_min = IWL6000_UCODE_API_MIN, 572 .ucode_api_min = IWL6000_UCODE_API_MIN,
@@ -521,10 +574,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
521 .ops = &iwl6000_ops, 574 .ops = &iwl6000_ops,
522 .eeprom_size = OTP_LOW_IMAGE_SIZE, 575 .eeprom_size = OTP_LOW_IMAGE_SIZE,
523 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 576 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
524 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 577 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
525 .num_of_queues = IWL50_NUM_QUEUES, 578 .num_of_queues = IWLAGN_NUM_QUEUES,
526 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES, 579 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
527 .mod_params = &iwl50_mod_params, 580 .mod_params = &iwlagn_mod_params,
528 .valid_tx_ant = ANT_ABC, 581 .valid_tx_ant = ANT_ABC,
529 .valid_rx_ant = ANT_ABC, 582 .valid_rx_ant = ANT_ABC,
530 .pll_cfg_val = 0, 583 .pll_cfg_val = 0,
@@ -542,6 +595,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
542 .support_ct_kill_exit = true, 595 .support_ct_kill_exit = true,
543 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 596 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
544 .chain_noise_scale = 1000, 597 .chain_noise_scale = 1000,
598 .monitor_recover_period = IWL_MONITORING_PERIOD,
599 .max_event_log_size = 1024,
545}; 600};
546 601
547MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 602MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
new file mode 100644
index 000000000000..28bc8f8ba981
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -0,0 +1,274 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-agn.h"
39
40static int iwlagn_send_rxon_assoc(struct iwl_priv *priv)
41{
42 int ret = 0;
43 struct iwl5000_rxon_assoc_cmd rxon_assoc;
44 const struct iwl_rxon_cmd *rxon1 = &priv->staging_rxon;
45 const struct iwl_rxon_cmd *rxon2 = &priv->active_rxon;
46
47 if ((rxon1->flags == rxon2->flags) &&
48 (rxon1->filter_flags == rxon2->filter_flags) &&
49 (rxon1->cck_basic_rates == rxon2->cck_basic_rates) &&
50 (rxon1->ofdm_ht_single_stream_basic_rates ==
51 rxon2->ofdm_ht_single_stream_basic_rates) &&
52 (rxon1->ofdm_ht_dual_stream_basic_rates ==
53 rxon2->ofdm_ht_dual_stream_basic_rates) &&
54 (rxon1->ofdm_ht_triple_stream_basic_rates ==
55 rxon2->ofdm_ht_triple_stream_basic_rates) &&
56 (rxon1->acquisition_data == rxon2->acquisition_data) &&
57 (rxon1->rx_chain == rxon2->rx_chain) &&
58 (rxon1->ofdm_basic_rates == rxon2->ofdm_basic_rates)) {
59 IWL_DEBUG_INFO(priv, "Using current RXON_ASSOC. Not resending.\n");
60 return 0;
61 }
62
63 rxon_assoc.flags = priv->staging_rxon.flags;
64 rxon_assoc.filter_flags = priv->staging_rxon.filter_flags;
65 rxon_assoc.ofdm_basic_rates = priv->staging_rxon.ofdm_basic_rates;
66 rxon_assoc.cck_basic_rates = priv->staging_rxon.cck_basic_rates;
67 rxon_assoc.reserved1 = 0;
68 rxon_assoc.reserved2 = 0;
69 rxon_assoc.reserved3 = 0;
70 rxon_assoc.ofdm_ht_single_stream_basic_rates =
71 priv->staging_rxon.ofdm_ht_single_stream_basic_rates;
72 rxon_assoc.ofdm_ht_dual_stream_basic_rates =
73 priv->staging_rxon.ofdm_ht_dual_stream_basic_rates;
74 rxon_assoc.rx_chain_select_flags = priv->staging_rxon.rx_chain;
75 rxon_assoc.ofdm_ht_triple_stream_basic_rates =
76 priv->staging_rxon.ofdm_ht_triple_stream_basic_rates;
77 rxon_assoc.acquisition_data = priv->staging_rxon.acquisition_data;
78
79 ret = iwl_send_cmd_pdu_async(priv, REPLY_RXON_ASSOC,
80 sizeof(rxon_assoc), &rxon_assoc, NULL);
81 if (ret)
82 return ret;
83
84 return ret;
85}
86
87static int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
88{
89 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
90 .valid = cpu_to_le32(valid_tx_ant),
91 };
92
93 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
94 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
95 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
96 sizeof(struct iwl_tx_ant_config_cmd),
97 &tx_ant_cmd);
98 } else {
99 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
100 return -EOPNOTSUPP;
101 }
102}
103
104/* Currently this is the superset of everything */
105static u16 iwlagn_get_hcmd_size(u8 cmd_id, u16 len)
106{
107 return len;
108}
109
110static u16 iwlagn_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd, u8 *data)
111{
112 u16 size = (u16)sizeof(struct iwl_addsta_cmd);
113 struct iwl_addsta_cmd *addsta = (struct iwl_addsta_cmd *)data;
114 memcpy(addsta, cmd, size);
115 /* resrved in 5000 */
116 addsta->rate_n_flags = cpu_to_le16(0);
117 return size;
118}
119
120static void iwlagn_gain_computation(struct iwl_priv *priv,
121 u32 average_noise[NUM_RX_CHAINS],
122 u16 min_average_noise_antenna_i,
123 u32 min_average_noise,
124 u8 default_chain)
125{
126 int i;
127 s32 delta_g;
128 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
129
130 /*
131 * Find Gain Code for the chains based on "default chain"
132 */
133 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
134 if ((data->disconn_array[i])) {
135 data->delta_gain_code[i] = 0;
136 continue;
137 }
138
139 delta_g = (priv->cfg->chain_noise_scale *
140 ((s32)average_noise[default_chain] -
141 (s32)average_noise[i])) / 1500;
142
143 /* bound gain by 2 bits value max, 3rd bit is sign */
144 data->delta_gain_code[i] =
145 min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
146
147 if (delta_g < 0)
148 /*
149 * set negative sign ...
150 * note to Intel developers: This is uCode API format,
151 * not the format of any internal device registers.
152 * Do not change this format for e.g. 6050 or similar
153 * devices. Change format only if more resolution
154 * (i.e. more than 2 bits magnitude) is needed.
155 */
156 data->delta_gain_code[i] |= (1 << 2);
157 }
158
159 IWL_DEBUG_CALIB(priv, "Delta gains: ANT_B = %d ANT_C = %d\n",
160 data->delta_gain_code[1], data->delta_gain_code[2]);
161
162 if (!data->radio_write) {
163 struct iwl_calib_chain_noise_gain_cmd cmd;
164
165 memset(&cmd, 0, sizeof(cmd));
166
167 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_GAIN_CMD;
168 cmd.hdr.first_group = 0;
169 cmd.hdr.groups_num = 1;
170 cmd.hdr.data_valid = 1;
171 cmd.delta_gain_1 = data->delta_gain_code[1];
172 cmd.delta_gain_2 = data->delta_gain_code[2];
173 iwl_send_cmd_pdu_async(priv, REPLY_PHY_CALIBRATION_CMD,
174 sizeof(cmd), &cmd, NULL);
175
176 data->radio_write = 1;
177 data->state = IWL_CHAIN_NOISE_CALIBRATED;
178 }
179
180 data->chain_noise_a = 0;
181 data->chain_noise_b = 0;
182 data->chain_noise_c = 0;
183 data->chain_signal_a = 0;
184 data->chain_signal_b = 0;
185 data->chain_signal_c = 0;
186 data->beacon_count = 0;
187}
188
189static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
190{
191 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
192 int ret;
193
194 if ((data->state == IWL_CHAIN_NOISE_ALIVE) && iwl_is_associated(priv)) {
195 struct iwl_calib_chain_noise_reset_cmd cmd;
196 memset(&cmd, 0, sizeof(cmd));
197
198 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CHAIN_NOISE_RESET_CMD;
199 cmd.hdr.first_group = 0;
200 cmd.hdr.groups_num = 1;
201 cmd.hdr.data_valid = 1;
202 ret = iwl_send_cmd_pdu(priv, REPLY_PHY_CALIBRATION_CMD,
203 sizeof(cmd), &cmd);
204 if (ret)
205 IWL_ERR(priv,
206 "Could not send REPLY_PHY_CALIBRATION_CMD\n");
207 data->state = IWL_CHAIN_NOISE_ACCUMULATE;
208 IWL_DEBUG_CALIB(priv, "Run chain_noise_calibrate\n");
209 }
210}
211
212static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
213 __le32 *tx_flags)
214{
215 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
216 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
217 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
218 else
219 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
220}
221
222/* Calc max signal level (dBm) among 3 possible receivers */
223static int iwlagn_calc_rssi(struct iwl_priv *priv,
224 struct iwl_rx_phy_res *rx_resp)
225{
226 /* data from PHY/DSP regarding signal strength, etc.,
227 * contents are always there, not configurable by host
228 */
229 struct iwl5000_non_cfg_phy *ncphy =
230 (struct iwl5000_non_cfg_phy *)rx_resp->non_cfg_phy_buf;
231 u32 val, rssi_a, rssi_b, rssi_c, max_rssi;
232 u8 agc;
233
234 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_AGC_IDX]);
235 agc = (val & IWL50_OFDM_AGC_MSK) >> IWL50_OFDM_AGC_BIT_POS;
236
237 /* Find max rssi among 3 possible receivers.
238 * These values are measured by the digital signal processor (DSP).
239 * They should stay fairly constant even as the signal strength varies,
240 * if the radio's automatic gain control (AGC) is working right.
241 * AGC value (see below) will provide the "interesting" info.
242 */
243 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_AB_IDX]);
244 rssi_a = (val & IWL50_OFDM_RSSI_A_MSK) >> IWL50_OFDM_RSSI_A_BIT_POS;
245 rssi_b = (val & IWL50_OFDM_RSSI_B_MSK) >> IWL50_OFDM_RSSI_B_BIT_POS;
246 val = le32_to_cpu(ncphy->non_cfg_phy[IWL50_RX_RES_RSSI_C_IDX]);
247 rssi_c = (val & IWL50_OFDM_RSSI_C_MSK) >> IWL50_OFDM_RSSI_C_BIT_POS;
248
249 max_rssi = max_t(u32, rssi_a, rssi_b);
250 max_rssi = max_t(u32, max_rssi, rssi_c);
251
252 IWL_DEBUG_STATS(priv, "Rssi In A %d B %d C %d Max %d AGC dB %d\n",
253 rssi_a, rssi_b, rssi_c, max_rssi, agc);
254
255 /* dBm = max_rssi dB - agc dB - constant.
256 * Higher AGC (higher radio gain) means lower signal. */
257 return max_rssi - agc - IWLAGN_RSSI_OFFSET;
258}
259
260struct iwl_hcmd_ops iwlagn_hcmd = {
261 .rxon_assoc = iwlagn_send_rxon_assoc,
262 .commit_rxon = iwl_commit_rxon,
263 .set_rxon_chain = iwl_set_rxon_chain,
264 .set_tx_ant = iwlagn_send_tx_ant_config,
265};
266
267struct iwl_hcmd_utils_ops iwlagn_hcmd_utils = {
268 .get_hcmd_size = iwlagn_get_hcmd_size,
269 .build_addsta_hcmd = iwlagn_build_addsta_hcmd,
270 .gain_computation = iwlagn_gain_computation,
271 .chain_noise_reset = iwlagn_chain_noise_reset,
272 .rts_tx_cmd_flag = iwlagn_rts_tx_cmd_flag,
273 .calc_rssi = iwlagn_calc_rssi,
274};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hw.h b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
new file mode 100644
index 000000000000..f9a3fbb6338f
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hw.h
@@ -0,0 +1,118 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63/*
64 * Please use this file (iwl-agn-hw.h) only for hardware-related definitions.
65 */
66
67#ifndef __iwl_agn_hw_h__
68#define __iwl_agn_hw_h__
69
70#define IWLAGN_RTC_INST_LOWER_BOUND (0x000000)
71#define IWLAGN_RTC_INST_UPPER_BOUND (0x020000)
72
73#define IWLAGN_RTC_DATA_LOWER_BOUND (0x800000)
74#define IWLAGN_RTC_DATA_UPPER_BOUND (0x80C000)
75
76#define IWLAGN_RTC_INST_SIZE (IWLAGN_RTC_INST_UPPER_BOUND - \
77 IWLAGN_RTC_INST_LOWER_BOUND)
78#define IWLAGN_RTC_DATA_SIZE (IWLAGN_RTC_DATA_UPPER_BOUND - \
79 IWLAGN_RTC_DATA_LOWER_BOUND)
80
81/* RSSI to dBm */
82#define IWLAGN_RSSI_OFFSET 44
83
84/* PCI registers */
85#define PCI_CFG_RETRY_TIMEOUT 0x041
86
87/* PCI register values */
88#define PCI_CFG_LINK_CTRL_VAL_L0S_EN 0x01
89#define PCI_CFG_LINK_CTRL_VAL_L1_EN 0x02
90
91#define IWLAGN_DEFAULT_TX_RETRY 15
92
93/* Limit range of txpower output target to be between these values */
94#define IWLAGN_TX_POWER_TARGET_POWER_MIN (0) /* 0 dBm: 1 milliwatt */
95#define IWLAGN_TX_POWER_TARGET_POWER_MAX (16) /* 16 dBm */
96
97/* EEPROM */
98#define IWLAGN_EEPROM_IMG_SIZE 2048
99
100#define IWLAGN_CMD_FIFO_NUM 7
101#define IWLAGN_NUM_QUEUES 20
102#define IWLAGN_NUM_AMPDU_QUEUES 10
103#define IWLAGN_FIRST_AMPDU_QUEUE 10
104
105/* Fixed (non-configurable) rx data from phy */
106
107/**
108 * struct iwlagn_schedq_bc_tbl scheduler byte count table
109 * base physical address provided by SCD_DRAM_BASE_ADDR
110 * @tfd_offset 0-12 - tx command byte count
111 * 12-16 - station index
112 */
113struct iwlagn_scd_bc_tbl {
114 __le16 tfd_offset[TFD_QUEUE_BC_SIZE];
115} __attribute__ ((packed));
116
117
118#endif /* __iwl_agn_hw_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ict.c b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
new file mode 100644
index 000000000000..a273e373b7b0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ict.c
@@ -0,0 +1,307 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/etherdevice.h>
32#include <linux/sched.h>
33#include <net/mac80211.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-agn.h"
38#include "iwl-helpers.h"
39
40#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
41
42/* Free dram table */
43void iwl_free_isr_ict(struct iwl_priv *priv)
44{
45 if (priv->_agn.ict_tbl_vir) {
46 dma_free_coherent(&priv->pci_dev->dev,
47 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
48 priv->_agn.ict_tbl_vir,
49 priv->_agn.ict_tbl_dma);
50 priv->_agn.ict_tbl_vir = NULL;
51 }
52}
53
54
55/* allocate dram shared table it is a PAGE_SIZE aligned
56 * also reset all data related to ICT table interrupt.
57 */
58int iwl_alloc_isr_ict(struct iwl_priv *priv)
59{
60
61 if (priv->cfg->use_isr_legacy)
62 return 0;
63 /* allocate shrared data table */
64 priv->_agn.ict_tbl_vir =
65 dma_alloc_coherent(&priv->pci_dev->dev,
66 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
67 &priv->_agn.ict_tbl_dma, GFP_KERNEL);
68 if (!priv->_agn.ict_tbl_vir)
69 return -ENOMEM;
70
71 /* align table to PAGE_SIZE boundry */
72 priv->_agn.aligned_ict_tbl_dma = ALIGN(priv->_agn.ict_tbl_dma, PAGE_SIZE);
73
74 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
75 (unsigned long long)priv->_agn.ict_tbl_dma,
76 (unsigned long long)priv->_agn.aligned_ict_tbl_dma,
77 (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
78
79 priv->_agn.ict_tbl = priv->_agn.ict_tbl_vir +
80 (priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma);
81
82 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
83 priv->_agn.ict_tbl, priv->_agn.ict_tbl_vir,
84 (int)(priv->_agn.aligned_ict_tbl_dma - priv->_agn.ict_tbl_dma));
85
86 /* reset table and index to all 0 */
87 memset(priv->_agn.ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
88 priv->_agn.ict_index = 0;
89
90 /* add periodic RX interrupt */
91 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
92 return 0;
93}
94
95/* Device is going up inform it about using ICT interrupt table,
96 * also we need to tell the driver to start using ICT interrupt.
97 */
98int iwl_reset_ict(struct iwl_priv *priv)
99{
100 u32 val;
101 unsigned long flags;
102
103 if (!priv->_agn.ict_tbl_vir)
104 return 0;
105
106 spin_lock_irqsave(&priv->lock, flags);
107 iwl_disable_interrupts(priv);
108
109 memset(&priv->_agn.ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
110
111 val = priv->_agn.aligned_ict_tbl_dma >> PAGE_SHIFT;
112
113 val |= CSR_DRAM_INT_TBL_ENABLE;
114 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
115
116 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
117 "aligned dma address %Lx\n",
118 val, (unsigned long long)priv->_agn.aligned_ict_tbl_dma);
119
120 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
121 priv->_agn.use_ict = true;
122 priv->_agn.ict_index = 0;
123 iwl_write32(priv, CSR_INT, priv->inta_mask);
124 iwl_enable_interrupts(priv);
125 spin_unlock_irqrestore(&priv->lock, flags);
126
127 return 0;
128}
129
130/* Device is going down disable ict interrupt usage */
131void iwl_disable_ict(struct iwl_priv *priv)
132{
133 unsigned long flags;
134
135 spin_lock_irqsave(&priv->lock, flags);
136 priv->_agn.use_ict = false;
137 spin_unlock_irqrestore(&priv->lock, flags);
138}
139
140static irqreturn_t iwl_isr(int irq, void *data)
141{
142 struct iwl_priv *priv = data;
143 u32 inta, inta_mask;
144 unsigned long flags;
145#ifdef CONFIG_IWLWIFI_DEBUG
146 u32 inta_fh;
147#endif
148 if (!priv)
149 return IRQ_NONE;
150
151 spin_lock_irqsave(&priv->lock, flags);
152
153 /* Disable (but don't clear!) interrupts here to avoid
154 * back-to-back ISRs and sporadic interrupts from our NIC.
155 * If we have something to service, the tasklet will re-enable ints.
156 * If we *don't* have something, we'll re-enable before leaving here. */
157 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
158 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
159
160 /* Discover which interrupts are active/pending */
161 inta = iwl_read32(priv, CSR_INT);
162
163 /* Ignore interrupt if there's nothing in NIC to service.
164 * This may be due to IRQ shared with another device,
165 * or due to sporadic interrupts thrown from our NIC. */
166 if (!inta) {
167 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
168 goto none;
169 }
170
171 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
172 /* Hardware disappeared. It might have already raised
173 * an interrupt */
174 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
175 goto unplugged;
176 }
177
178#ifdef CONFIG_IWLWIFI_DEBUG
179 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
180 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
181 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
182 "fh 0x%08x\n", inta, inta_mask, inta_fh);
183 }
184#endif
185
186 priv->_agn.inta |= inta;
187 /* iwl_irq_tasklet() will service interrupts and re-enable them */
188 if (likely(inta))
189 tasklet_schedule(&priv->irq_tasklet);
190 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
191 iwl_enable_interrupts(priv);
192
193 unplugged:
194 spin_unlock_irqrestore(&priv->lock, flags);
195 return IRQ_HANDLED;
196
197 none:
198 /* re-enable interrupts here since we don't have anything to service. */
199 /* only Re-enable if diabled by irq and no schedules tasklet. */
200 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
201 iwl_enable_interrupts(priv);
202
203 spin_unlock_irqrestore(&priv->lock, flags);
204 return IRQ_NONE;
205}
206
207/* interrupt handler using ict table, with this interrupt driver will
208 * stop using INTA register to get device's interrupt, reading this register
209 * is expensive, device will write interrupts in ICT dram table, increment
210 * index then will fire interrupt to driver, driver will OR all ICT table
211 * entries from current index up to table entry with 0 value. the result is
212 * the interrupt we need to service, driver will set the entries back to 0 and
213 * set index.
214 */
215irqreturn_t iwl_isr_ict(int irq, void *data)
216{
217 struct iwl_priv *priv = data;
218 u32 inta, inta_mask;
219 u32 val = 0;
220 unsigned long flags;
221
222 if (!priv)
223 return IRQ_NONE;
224
225 /* dram interrupt table not set yet,
226 * use legacy interrupt.
227 */
228 if (!priv->_agn.use_ict)
229 return iwl_isr(irq, data);
230
231 spin_lock_irqsave(&priv->lock, flags);
232
233 /* Disable (but don't clear!) interrupts here to avoid
234 * back-to-back ISRs and sporadic interrupts from our NIC.
235 * If we have something to service, the tasklet will re-enable ints.
236 * If we *don't* have something, we'll re-enable before leaving here.
237 */
238 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
239 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
240
241
242 /* Ignore interrupt if there's nothing in NIC to service.
243 * This may be due to IRQ shared with another device,
244 * or due to sporadic interrupts thrown from our NIC. */
245 if (!priv->_agn.ict_tbl[priv->_agn.ict_index]) {
246 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
247 goto none;
248 }
249
250 /* read all entries that not 0 start with ict_index */
251 while (priv->_agn.ict_tbl[priv->_agn.ict_index]) {
252
253 val |= le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]);
254 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
255 priv->_agn.ict_index,
256 le32_to_cpu(priv->_agn.ict_tbl[priv->_agn.ict_index]));
257 priv->_agn.ict_tbl[priv->_agn.ict_index] = 0;
258 priv->_agn.ict_index = iwl_queue_inc_wrap(priv->_agn.ict_index,
259 ICT_COUNT);
260
261 }
262
263 /* We should not get this value, just ignore it. */
264 if (val == 0xffffffff)
265 val = 0;
266
267 /*
268 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
269 * (bit 15 before shifting it to 31) to clear when using interrupt
270 * coalescing. fortunately, bits 18 and 19 stay set when this happens
271 * so we use them to decide on the real state of the Rx bit.
272 * In order words, bit 15 is set if bit 18 or bit 19 are set.
273 */
274 if (val & 0xC0000)
275 val |= 0x8000;
276
277 inta = (0xff & val) | ((0xff00 & val) << 16);
278 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
279 inta, inta_mask, val);
280
281 inta &= priv->inta_mask;
282 priv->_agn.inta |= inta;
283
284 /* iwl_irq_tasklet() will service interrupts and re-enable them */
285 if (likely(inta))
286 tasklet_schedule(&priv->irq_tasklet);
287 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta) {
288 /* Allow interrupt if was disabled by this handler and
289 * no tasklet was schedules, We should not enable interrupt,
290 * tasklet will enable it.
291 */
292 iwl_enable_interrupts(priv);
293 }
294
295 spin_unlock_irqrestore(&priv->lock, flags);
296 return IRQ_HANDLED;
297
298 none:
299 /* re-enable interrupts here since we don't have anything to service.
300 * only Re-enable if disabled by irq.
301 */
302 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->_agn.inta)
303 iwl_enable_interrupts(priv);
304
305 spin_unlock_irqrestore(&priv->lock, flags);
306 return IRQ_NONE;
307}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
new file mode 100644
index 000000000000..c465c8590833
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -0,0 +1,1113 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29#include <linux/etherdevice.h>
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static inline u32 iwlagn_get_scd_ssn(struct iwl5000_tx_resp *tx_resp)
43{
44 return le32_to_cpup((__le32 *)&tx_resp->status +
45 tx_resp->frame_count) & MAX_SN;
46}
47
48static int iwlagn_tx_status_reply_tx(struct iwl_priv *priv,
49 struct iwl_ht_agg *agg,
50 struct iwl5000_tx_resp *tx_resp,
51 int txq_id, u16 start_idx)
52{
53 u16 status;
54 struct agg_tx_status *frame_status = &tx_resp->status;
55 struct ieee80211_tx_info *info = NULL;
56 struct ieee80211_hdr *hdr = NULL;
57 u32 rate_n_flags = le32_to_cpu(tx_resp->rate_n_flags);
58 int i, sh, idx;
59 u16 seq;
60
61 if (agg->wait_for_ba)
62 IWL_DEBUG_TX_REPLY(priv, "got tx response w/o block-ack\n");
63
64 agg->frame_count = tx_resp->frame_count;
65 agg->start_idx = start_idx;
66 agg->rate_n_flags = rate_n_flags;
67 agg->bitmap = 0;
68
69 /* # frames attempted by Tx command */
70 if (agg->frame_count == 1) {
71 /* Only one frame was attempted; no block-ack will arrive */
72 status = le16_to_cpu(frame_status[0].status);
73 idx = start_idx;
74
75 /* FIXME: code repetition */
76 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, StartIdx=%d idx=%d\n",
77 agg->frame_count, agg->start_idx, idx);
78
79 info = IEEE80211_SKB_CB(priv->txq[txq_id].txb[idx].skb[0]);
80 info->status.rates[0].count = tx_resp->failure_frame + 1;
81 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
82 info->flags |= iwl_tx_status_to_mac80211(status);
83 iwlagn_hwrate_to_tx_control(priv, rate_n_flags, info);
84
85 /* FIXME: code repetition end */
86
87 IWL_DEBUG_TX_REPLY(priv, "1 Frame 0x%x failure :%d\n",
88 status & 0xff, tx_resp->failure_frame);
89 IWL_DEBUG_TX_REPLY(priv, "Rate Info rate_n_flags=%x\n", rate_n_flags);
90
91 agg->wait_for_ba = 0;
92 } else {
93 /* Two or more frames were attempted; expect block-ack */
94 u64 bitmap = 0;
95 int start = agg->start_idx;
96
97 /* Construct bit-map of pending frames within Tx window */
98 for (i = 0; i < agg->frame_count; i++) {
99 u16 sc;
100 status = le16_to_cpu(frame_status[i].status);
101 seq = le16_to_cpu(frame_status[i].sequence);
102 idx = SEQ_TO_INDEX(seq);
103 txq_id = SEQ_TO_QUEUE(seq);
104
105 if (status & (AGG_TX_STATE_FEW_BYTES_MSK |
106 AGG_TX_STATE_ABORT_MSK))
107 continue;
108
109 IWL_DEBUG_TX_REPLY(priv, "FrameCnt = %d, txq_id=%d idx=%d\n",
110 agg->frame_count, txq_id, idx);
111
112 hdr = iwl_tx_queue_get_hdr(priv, txq_id, idx);
113 if (!hdr) {
114 IWL_ERR(priv,
115 "BUG_ON idx doesn't point to valid skb"
116 " idx=%d, txq_id=%d\n", idx, txq_id);
117 return -1;
118 }
119
120 sc = le16_to_cpu(hdr->seq_ctrl);
121 if (idx != (SEQ_TO_SN(sc) & 0xff)) {
122 IWL_ERR(priv,
123 "BUG_ON idx doesn't match seq control"
124 " idx=%d, seq_idx=%d, seq=%d\n",
125 idx, SEQ_TO_SN(sc),
126 hdr->seq_ctrl);
127 return -1;
128 }
129
130 IWL_DEBUG_TX_REPLY(priv, "AGG Frame i=%d idx %d seq=%d\n",
131 i, idx, SEQ_TO_SN(sc));
132
133 sh = idx - start;
134 if (sh > 64) {
135 sh = (start - idx) + 0xff;
136 bitmap = bitmap << sh;
137 sh = 0;
138 start = idx;
139 } else if (sh < -64)
140 sh = 0xff - (start - idx);
141 else if (sh < 0) {
142 sh = start - idx;
143 start = idx;
144 bitmap = bitmap << sh;
145 sh = 0;
146 }
147 bitmap |= 1ULL << sh;
148 IWL_DEBUG_TX_REPLY(priv, "start=%d bitmap=0x%llx\n",
149 start, (unsigned long long)bitmap);
150 }
151
152 agg->bitmap = bitmap;
153 agg->start_idx = start;
154 IWL_DEBUG_TX_REPLY(priv, "Frames %d start_idx=%d bitmap=0x%llx\n",
155 agg->frame_count, agg->start_idx,
156 (unsigned long long)agg->bitmap);
157
158 if (bitmap)
159 agg->wait_for_ba = 1;
160 }
161 return 0;
162}
163
164void iwl_check_abort_status(struct iwl_priv *priv,
165 u8 frame_count, u32 status)
166{
167 if (frame_count == 1 && status == TX_STATUS_FAIL_RFKILL_FLUSH) {
168 IWL_ERR(priv, "TODO: Implement Tx flush command!!!\n");
169 }
170}
171
172static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
173 struct iwl_rx_mem_buffer *rxb)
174{
175 struct iwl_rx_packet *pkt = rxb_addr(rxb);
176 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
177 int txq_id = SEQ_TO_QUEUE(sequence);
178 int index = SEQ_TO_INDEX(sequence);
179 struct iwl_tx_queue *txq = &priv->txq[txq_id];
180 struct ieee80211_tx_info *info;
181 struct iwl5000_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
182 u32 status = le16_to_cpu(tx_resp->status.status);
183 int tid;
184 int sta_id;
185 int freed;
186
187 if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
188 IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
189 "is out of range [0-%d] %d %d\n", txq_id,
190 index, txq->q.n_bd, txq->q.write_ptr,
191 txq->q.read_ptr);
192 return;
193 }
194
195 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb[0]);
196 memset(&info->status, 0, sizeof(info->status));
197
198 tid = (tx_resp->ra_tid & IWL50_TX_RES_TID_MSK) >> IWL50_TX_RES_TID_POS;
199 sta_id = (tx_resp->ra_tid & IWL50_TX_RES_RA_MSK) >> IWL50_TX_RES_RA_POS;
200
201 if (txq->sched_retry) {
202 const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
203 struct iwl_ht_agg *agg = NULL;
204
205 agg = &priv->stations[sta_id].tid[tid].agg;
206
207 iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
208
209 /* check if BAR is needed */
210 if ((tx_resp->frame_count == 1) && !iwl_is_tx_success(status))
211 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
212
213 if (txq->q.read_ptr != (scd_ssn & 0xff)) {
214 index = iwl_queue_dec_wrap(scd_ssn & 0xff, txq->q.n_bd);
215 IWL_DEBUG_TX_REPLY(priv, "Retry scheduler reclaim "
216 "scd_ssn=%d idx=%d txq=%d swq=%d\n",
217 scd_ssn , index, txq_id, txq->swq_id);
218
219 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
220 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
221
222 if (priv->mac80211_registered &&
223 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
224 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) {
225 if (agg->state == IWL_AGG_OFF)
226 iwl_wake_queue(priv, txq_id);
227 else
228 iwl_wake_queue(priv, txq->swq_id);
229 }
230 }
231 } else {
232 BUG_ON(txq_id != txq->swq_id);
233
234 info->status.rates[0].count = tx_resp->failure_frame + 1;
235 info->flags |= iwl_tx_status_to_mac80211(status);
236 iwlagn_hwrate_to_tx_control(priv,
237 le32_to_cpu(tx_resp->rate_n_flags),
238 info);
239
240 IWL_DEBUG_TX_REPLY(priv, "TXQ %d status %s (0x%08x) rate_n_flags "
241 "0x%x retries %d\n",
242 txq_id,
243 iwl_get_tx_fail_reason(status), status,
244 le32_to_cpu(tx_resp->rate_n_flags),
245 tx_resp->failure_frame);
246
247 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
248 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
249
250 if (priv->mac80211_registered &&
251 (iwl_queue_space(&txq->q) > txq->q.low_mark))
252 iwl_wake_queue(priv, txq_id);
253 }
254
255 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
256
257 iwl_check_abort_status(priv, tx_resp->frame_count, status);
258}
259
260void iwlagn_rx_handler_setup(struct iwl_priv *priv)
261{
262 /* init calibration handlers */
263 priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
264 iwlagn_rx_calib_result;
265 priv->rx_handlers[CALIBRATION_COMPLETE_NOTIFICATION] =
266 iwlagn_rx_calib_complete;
267 priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
268}
269
270void iwlagn_setup_deferred_work(struct iwl_priv *priv)
271{
272 /* in agn, the tx power calibration is done in uCode */
273 priv->disable_tx_power_cal = 1;
274}
275
276int iwlagn_hw_valid_rtc_data_addr(u32 addr)
277{
278 return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
279 (addr < IWLAGN_RTC_DATA_UPPER_BOUND);
280}
281
282int iwlagn_send_tx_power(struct iwl_priv *priv)
283{
284 struct iwl5000_tx_power_dbm_cmd tx_power_cmd;
285 u8 tx_ant_cfg_cmd;
286
287 /* half dBm need to multiply */
288 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
289
290 if (priv->tx_power_lmt_in_half_dbm &&
291 priv->tx_power_lmt_in_half_dbm < tx_power_cmd.global_lmt) {
292 /*
293 * For the newer devices which using enhanced/extend tx power
294 * table in EEPROM, the format is in half dBm. driver need to
295 * convert to dBm format before report to mac80211.
296 * By doing so, there is a possibility of 1/2 dBm resolution
297 * lost. driver will perform "round-up" operation before
298 * reporting, but it will cause 1/2 dBm tx power over the
299 * regulatory limit. Perform the checking here, if the
300 * "tx_power_user_lmt" is higher than EEPROM value (in
301 * half-dBm format), lower the tx power based on EEPROM
302 */
303 tx_power_cmd.global_lmt = priv->tx_power_lmt_in_half_dbm;
304 }
305 tx_power_cmd.flags = IWL50_TX_POWER_NO_CLOSED;
306 tx_power_cmd.srv_chan_lmt = IWL50_TX_POWER_AUTO;
307
308 if (IWL_UCODE_API(priv->ucode_ver) == 1)
309 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD_V1;
310 else
311 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
312
313 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd,
314 sizeof(tx_power_cmd), &tx_power_cmd,
315 NULL);
316}
317
318void iwlagn_temperature(struct iwl_priv *priv)
319{
320 /* store temperature from statistics (in Celsius) */
321 priv->temperature = le32_to_cpu(priv->statistics.general.temperature);
322 iwl_tt_handler(priv);
323}
324
325u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv)
326{
327 struct iwl_eeprom_calib_hdr {
328 u8 version;
329 u8 pa_type;
330 u16 voltage;
331 } *hdr;
332
333 hdr = (struct iwl_eeprom_calib_hdr *)iwl_eeprom_query_addr(priv,
334 EEPROM_5000_CALIB_ALL);
335 return hdr->version;
336
337}
338
339/*
340 * EEPROM
341 */
342static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
343{
344 u16 offset = 0;
345
346 if ((address & INDIRECT_ADDRESS) == 0)
347 return address;
348
349 switch (address & INDIRECT_TYPE_MSK) {
350 case INDIRECT_HOST:
351 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_HOST);
352 break;
353 case INDIRECT_GENERAL:
354 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_GENERAL);
355 break;
356 case INDIRECT_REGULATORY:
357 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_REGULATORY);
358 break;
359 case INDIRECT_CALIBRATION:
360 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_CALIBRATION);
361 break;
362 case INDIRECT_PROCESS_ADJST:
363 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_PROCESS_ADJST);
364 break;
365 case INDIRECT_OTHERS:
366 offset = iwl_eeprom_query16(priv, EEPROM_5000_LINK_OTHERS);
367 break;
368 default:
369 IWL_ERR(priv, "illegal indirect type: 0x%X\n",
370 address & INDIRECT_TYPE_MSK);
371 break;
372 }
373
374 /* translate the offset from words to byte */
375 return (address & ADDRESS_MSK) + (offset << 1);
376}
377
378const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
379 size_t offset)
380{
381 u32 address = eeprom_indirect_address(priv, offset);
382 BUG_ON(address >= priv->cfg->eeprom_size);
383 return &priv->eeprom[address];
384}
385
386struct iwl_mod_params iwlagn_mod_params = {
387 .amsdu_size_8K = 1,
388 .restart_fw = 1,
389 /* the rest are 0 by default */
390};
391
392void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
393{
394 unsigned long flags;
395 int i;
396 spin_lock_irqsave(&rxq->lock, flags);
397 INIT_LIST_HEAD(&rxq->rx_free);
398 INIT_LIST_HEAD(&rxq->rx_used);
399 /* Fill the rx_used queue with _all_ of the Rx buffers */
400 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
401 /* In the reset function, these buffers may have been allocated
402 * to an SKB, so we need to unmap and free potential storage */
403 if (rxq->pool[i].page != NULL) {
404 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
405 PAGE_SIZE << priv->hw_params.rx_page_order,
406 PCI_DMA_FROMDEVICE);
407 __iwl_free_pages(priv, rxq->pool[i].page);
408 rxq->pool[i].page = NULL;
409 }
410 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
411 }
412
413 for (i = 0; i < RX_QUEUE_SIZE; i++)
414 rxq->queue[i] = NULL;
415
416 /* Set us so that we have processed and used all buffers, but have
417 * not restocked the Rx queue with fresh buffers */
418 rxq->read = rxq->write = 0;
419 rxq->write_actual = 0;
420 rxq->free_count = 0;
421 spin_unlock_irqrestore(&rxq->lock, flags);
422}
423
424int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
425{
426 u32 rb_size;
427 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
428 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
429
430 if (!priv->cfg->use_isr_legacy)
431 rb_timeout = RX_RB_TIMEOUT;
432
433 if (priv->cfg->mod_params->amsdu_size_8K)
434 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
435 else
436 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
437
438 /* Stop Rx DMA */
439 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
440
441 /* Reset driver's Rx queue write index */
442 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
443
444 /* Tell device where to find RBD circular buffer in DRAM */
445 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
446 (u32)(rxq->dma_addr >> 8));
447
448 /* Tell device where in DRAM to update its Rx status */
449 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
450 rxq->rb_stts_dma >> 4);
451
452 /* Enable Rx DMA
453 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
454 * the credit mechanism in 5000 HW RX FIFO
455 * Direct rx interrupts to hosts
456 * Rx buffer size 4 or 8k
457 * RB timeout 0x10
458 * 256 RBDs
459 */
460 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
461 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
462 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
463 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
464 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
465 rb_size|
466 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
467 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
468
469 /* Set interrupt coalescing timer to default (2048 usecs) */
470 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
471
472 return 0;
473}
474
475int iwlagn_hw_nic_init(struct iwl_priv *priv)
476{
477 unsigned long flags;
478 struct iwl_rx_queue *rxq = &priv->rxq;
479 int ret;
480
481 /* nic_init */
482 spin_lock_irqsave(&priv->lock, flags);
483 priv->cfg->ops->lib->apm_ops.init(priv);
484
485 /* Set interrupt coalescing calibration timer to default (512 usecs) */
486 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
487
488 spin_unlock_irqrestore(&priv->lock, flags);
489
490 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
491
492 priv->cfg->ops->lib->apm_ops.config(priv);
493
494 /* Allocate the RX queue, or reset if it is already allocated */
495 if (!rxq->bd) {
496 ret = iwl_rx_queue_alloc(priv);
497 if (ret) {
498 IWL_ERR(priv, "Unable to initialize Rx queue\n");
499 return -ENOMEM;
500 }
501 } else
502 iwlagn_rx_queue_reset(priv, rxq);
503
504 iwlagn_rx_replenish(priv);
505
506 iwlagn_rx_init(priv, rxq);
507
508 spin_lock_irqsave(&priv->lock, flags);
509
510 rxq->need_update = 1;
511 iwl_rx_queue_update_write_ptr(priv, rxq);
512
513 spin_unlock_irqrestore(&priv->lock, flags);
514
515 /* Allocate or reset and init all Tx and Command queues */
516 if (!priv->txq) {
517 ret = iwlagn_txq_ctx_alloc(priv);
518 if (ret)
519 return ret;
520 } else
521 iwlagn_txq_ctx_reset(priv);
522
523 set_bit(STATUS_INIT, &priv->status);
524
525 return 0;
526}
527
528/**
529 * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
530 */
531static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
532 dma_addr_t dma_addr)
533{
534 return cpu_to_le32((u32)(dma_addr >> 8));
535}
536
537/**
538 * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
539 *
540 * If there are slots in the RX queue that need to be restocked,
541 * and we have free pre-allocated buffers, fill the ranks as much
542 * as we can, pulling from rx_free.
543 *
544 * This moves the 'write' index forward to catch up with 'processed', and
545 * also updates the memory address in the firmware to reference the new
546 * target buffer.
547 */
548void iwlagn_rx_queue_restock(struct iwl_priv *priv)
549{
550 struct iwl_rx_queue *rxq = &priv->rxq;
551 struct list_head *element;
552 struct iwl_rx_mem_buffer *rxb;
553 unsigned long flags;
554
555 spin_lock_irqsave(&rxq->lock, flags);
556 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
557 /* The overwritten rxb must be a used one */
558 rxb = rxq->queue[rxq->write];
559 BUG_ON(rxb && rxb->page);
560
561 /* Get next free Rx buffer, remove from free list */
562 element = rxq->rx_free.next;
563 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
564 list_del(element);
565
566 /* Point to Rx buffer via next RBD in circular buffer */
567 rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
568 rxb->page_dma);
569 rxq->queue[rxq->write] = rxb;
570 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
571 rxq->free_count--;
572 }
573 spin_unlock_irqrestore(&rxq->lock, flags);
574 /* If the pre-allocated buffer pool is dropping low, schedule to
575 * refill it */
576 if (rxq->free_count <= RX_LOW_WATERMARK)
577 queue_work(priv->workqueue, &priv->rx_replenish);
578
579
580 /* If we've added more space for the firmware to place data, tell it.
581 * Increment device's write pointer in multiples of 8. */
582 if (rxq->write_actual != (rxq->write & ~0x7)) {
583 spin_lock_irqsave(&rxq->lock, flags);
584 rxq->need_update = 1;
585 spin_unlock_irqrestore(&rxq->lock, flags);
586 iwl_rx_queue_update_write_ptr(priv, rxq);
587 }
588}
589
590/**
591 * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
592 *
593 * When moving to rx_free an SKB is allocated for the slot.
594 *
595 * Also restock the Rx queue via iwl_rx_queue_restock.
596 * This is called as a scheduled work item (except for during initialization)
597 */
598void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
599{
600 struct iwl_rx_queue *rxq = &priv->rxq;
601 struct list_head *element;
602 struct iwl_rx_mem_buffer *rxb;
603 struct page *page;
604 unsigned long flags;
605 gfp_t gfp_mask = priority;
606
607 while (1) {
608 spin_lock_irqsave(&rxq->lock, flags);
609 if (list_empty(&rxq->rx_used)) {
610 spin_unlock_irqrestore(&rxq->lock, flags);
611 return;
612 }
613 spin_unlock_irqrestore(&rxq->lock, flags);
614
615 if (rxq->free_count > RX_LOW_WATERMARK)
616 gfp_mask |= __GFP_NOWARN;
617
618 if (priv->hw_params.rx_page_order > 0)
619 gfp_mask |= __GFP_COMP;
620
621 /* Alloc a new receive buffer */
622 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
623 if (!page) {
624 if (net_ratelimit())
625 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
626 "order: %d\n",
627 priv->hw_params.rx_page_order);
628
629 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
630 net_ratelimit())
631 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
632 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
633 rxq->free_count);
634 /* We don't reschedule replenish work here -- we will
635 * call the restock method and if it still needs
636 * more buffers it will schedule replenish */
637 return;
638 }
639
640 spin_lock_irqsave(&rxq->lock, flags);
641
642 if (list_empty(&rxq->rx_used)) {
643 spin_unlock_irqrestore(&rxq->lock, flags);
644 __free_pages(page, priv->hw_params.rx_page_order);
645 return;
646 }
647 element = rxq->rx_used.next;
648 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
649 list_del(element);
650
651 spin_unlock_irqrestore(&rxq->lock, flags);
652
653 BUG_ON(rxb->page);
654 rxb->page = page;
655 /* Get physical address of the RB */
656 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
657 PAGE_SIZE << priv->hw_params.rx_page_order,
658 PCI_DMA_FROMDEVICE);
659 /* dma address must be no more than 36 bits */
660 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
661 /* and also 256 byte aligned! */
662 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
663
664 spin_lock_irqsave(&rxq->lock, flags);
665
666 list_add_tail(&rxb->list, &rxq->rx_free);
667 rxq->free_count++;
668 priv->alloc_rxb_page++;
669
670 spin_unlock_irqrestore(&rxq->lock, flags);
671 }
672}
673
674void iwlagn_rx_replenish(struct iwl_priv *priv)
675{
676 unsigned long flags;
677
678 iwlagn_rx_allocate(priv, GFP_KERNEL);
679
680 spin_lock_irqsave(&priv->lock, flags);
681 iwlagn_rx_queue_restock(priv);
682 spin_unlock_irqrestore(&priv->lock, flags);
683}
684
685void iwlagn_rx_replenish_now(struct iwl_priv *priv)
686{
687 iwlagn_rx_allocate(priv, GFP_ATOMIC);
688
689 iwlagn_rx_queue_restock(priv);
690}
691
692/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
693 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
694 * This free routine walks the list of POOL entries and if SKB is set to
695 * non NULL it is unmapped and freed
696 */
697void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
698{
699 int i;
700 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
701 if (rxq->pool[i].page != NULL) {
702 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
703 PAGE_SIZE << priv->hw_params.rx_page_order,
704 PCI_DMA_FROMDEVICE);
705 __iwl_free_pages(priv, rxq->pool[i].page);
706 rxq->pool[i].page = NULL;
707 }
708 }
709
710 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
711 rxq->dma_addr);
712 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
713 rxq->rb_stts, rxq->rb_stts_dma);
714 rxq->bd = NULL;
715 rxq->rb_stts = NULL;
716}
717
718int iwlagn_rxq_stop(struct iwl_priv *priv)
719{
720
721 /* stop Rx DMA */
722 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
723 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
724 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
725
726 return 0;
727}
728
729int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
730{
731 int idx = 0;
732 int band_offset = 0;
733
734 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
735 if (rate_n_flags & RATE_MCS_HT_MSK) {
736 idx = (rate_n_flags & 0xff);
737 return idx;
738 /* Legacy rate format, search for match in table */
739 } else {
740 if (band == IEEE80211_BAND_5GHZ)
741 band_offset = IWL_FIRST_OFDM_RATE;
742 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
743 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
744 return idx - band_offset;
745 }
746
747 return -1;
748}
749
750/* Calc max signal level (dBm) among 3 possible receivers */
751static inline int iwlagn_calc_rssi(struct iwl_priv *priv,
752 struct iwl_rx_phy_res *rx_resp)
753{
754 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
755}
756
757#ifdef CONFIG_IWLWIFI_DEBUG
758/**
759 * iwlagn_dbg_report_frame - dump frame to syslog during debug sessions
760 *
761 * You may hack this function to show different aspects of received frames,
762 * including selective frame dumps.
763 * group100 parameter selects whether to show 1 out of 100 good data frames.
764 * All beacon and probe response frames are printed.
765 */
766static void iwlagn_dbg_report_frame(struct iwl_priv *priv,
767 struct iwl_rx_phy_res *phy_res, u16 length,
768 struct ieee80211_hdr *header, int group100)
769{
770 u32 to_us;
771 u32 print_summary = 0;
772 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
773 u32 hundred = 0;
774 u32 dataframe = 0;
775 __le16 fc;
776 u16 seq_ctl;
777 u16 channel;
778 u16 phy_flags;
779 u32 rate_n_flags;
780 u32 tsf_low;
781 int rssi;
782
783 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
784 return;
785
786 /* MAC header */
787 fc = header->frame_control;
788 seq_ctl = le16_to_cpu(header->seq_ctrl);
789
790 /* metadata */
791 channel = le16_to_cpu(phy_res->channel);
792 phy_flags = le16_to_cpu(phy_res->phy_flags);
793 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
794
795 /* signal statistics */
796 rssi = iwlagn_calc_rssi(priv, phy_res);
797 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
798
799 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
800
801 /* if data frame is to us and all is good,
802 * (optionally) print summary for only 1 out of every 100 */
803 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
804 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
805 dataframe = 1;
806 if (!group100)
807 print_summary = 1; /* print each frame */
808 else if (priv->framecnt_to_us < 100) {
809 priv->framecnt_to_us++;
810 print_summary = 0;
811 } else {
812 priv->framecnt_to_us = 0;
813 print_summary = 1;
814 hundred = 1;
815 }
816 } else {
817 /* print summary for all other frames */
818 print_summary = 1;
819 }
820
821 if (print_summary) {
822 char *title;
823 int rate_idx;
824 u32 bitrate;
825
826 if (hundred)
827 title = "100Frames";
828 else if (ieee80211_has_retry(fc))
829 title = "Retry";
830 else if (ieee80211_is_assoc_resp(fc))
831 title = "AscRsp";
832 else if (ieee80211_is_reassoc_resp(fc))
833 title = "RasRsp";
834 else if (ieee80211_is_probe_resp(fc)) {
835 title = "PrbRsp";
836 print_dump = 1; /* dump frame contents */
837 } else if (ieee80211_is_beacon(fc)) {
838 title = "Beacon";
839 print_dump = 1; /* dump frame contents */
840 } else if (ieee80211_is_atim(fc))
841 title = "ATIM";
842 else if (ieee80211_is_auth(fc))
843 title = "Auth";
844 else if (ieee80211_is_deauth(fc))
845 title = "DeAuth";
846 else if (ieee80211_is_disassoc(fc))
847 title = "DisAssoc";
848 else
849 title = "Frame";
850
851 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
852 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
853 bitrate = 0;
854 WARN_ON_ONCE(1);
855 } else {
856 bitrate = iwl_rates[rate_idx].ieee / 2;
857 }
858
859 /* print frame summary.
860 * MAC addresses show just the last byte (for brevity),
861 * but you can hack it to show more, if you'd like to. */
862 if (dataframe)
863 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
864 "len=%u, rssi=%d, chnl=%d, rate=%u,\n",
865 title, le16_to_cpu(fc), header->addr1[5],
866 length, rssi, channel, bitrate);
867 else {
868 /* src/dst addresses assume managed mode */
869 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
870 "len=%u, rssi=%d, tim=%lu usec, "
871 "phy=0x%02x, chnl=%d\n",
872 title, le16_to_cpu(fc), header->addr1[5],
873 header->addr3[5], length, rssi,
874 tsf_low - priv->scan_start_tsf,
875 phy_flags, channel);
876 }
877 }
878 if (print_dump)
879 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
880}
881#endif
882
883static u32 iwlagn_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
884{
885 u32 decrypt_out = 0;
886
887 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
888 RX_RES_STATUS_STATION_FOUND)
889 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
890 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
891
892 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
893
894 /* packet was not encrypted */
895 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
896 RX_RES_STATUS_SEC_TYPE_NONE)
897 return decrypt_out;
898
899 /* packet was encrypted with unknown alg */
900 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
901 RX_RES_STATUS_SEC_TYPE_ERR)
902 return decrypt_out;
903
904 /* decryption was not done in HW */
905 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
906 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
907 return decrypt_out;
908
909 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
910
911 case RX_RES_STATUS_SEC_TYPE_CCMP:
912 /* alg is CCM: check MIC only */
913 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
914 /* Bad MIC */
915 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
916 else
917 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
918
919 break;
920
921 case RX_RES_STATUS_SEC_TYPE_TKIP:
922 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
923 /* Bad TTAK */
924 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
925 break;
926 }
927 /* fall through if TTAK OK */
928 default:
929 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
930 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
931 else
932 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
933 break;
934 };
935
936 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
937 decrypt_in, decrypt_out);
938
939 return decrypt_out;
940}
941
942static void iwlagn_pass_packet_to_mac80211(struct iwl_priv *priv,
943 struct ieee80211_hdr *hdr,
944 u16 len,
945 u32 ampdu_status,
946 struct iwl_rx_mem_buffer *rxb,
947 struct ieee80211_rx_status *stats)
948{
949 struct sk_buff *skb;
950 __le16 fc = hdr->frame_control;
951
952 /* We only process data packets if the interface is open */
953 if (unlikely(!priv->is_open)) {
954 IWL_DEBUG_DROP_LIMIT(priv,
955 "Dropping packet while interface is not open.\n");
956 return;
957 }
958
959 /* In case of HW accelerated crypto and bad decryption, drop */
960 if (!priv->cfg->mod_params->sw_crypto &&
961 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
962 return;
963
964 skb = dev_alloc_skb(128);
965 if (!skb) {
966 IWL_ERR(priv, "dev_alloc_skb failed\n");
967 return;
968 }
969
970 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
971
972 iwl_update_stats(priv, false, fc, len);
973 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
974
975 ieee80211_rx(priv->hw, skb);
976 priv->alloc_rxb_page--;
977 rxb->page = NULL;
978}
979
980/* Called for REPLY_RX (legacy ABG frames), or
981 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
982void iwlagn_rx_reply_rx(struct iwl_priv *priv,
983 struct iwl_rx_mem_buffer *rxb)
984{
985 struct ieee80211_hdr *header;
986 struct ieee80211_rx_status rx_status;
987 struct iwl_rx_packet *pkt = rxb_addr(rxb);
988 struct iwl_rx_phy_res *phy_res;
989 __le32 rx_pkt_status;
990 struct iwl4965_rx_mpdu_res_start *amsdu;
991 u32 len;
992 u32 ampdu_status;
993 u32 rate_n_flags;
994
995 /**
996 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
997 * REPLY_RX: physical layer info is in this buffer
998 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
999 * command and cached in priv->last_phy_res
1000 *
1001 * Here we set up local variables depending on which command is
1002 * received.
1003 */
1004 if (pkt->hdr.cmd == REPLY_RX) {
1005 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1006 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1007 + phy_res->cfg_phy_cnt);
1008
1009 len = le16_to_cpu(phy_res->byte_count);
1010 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1011 phy_res->cfg_phy_cnt + len);
1012 ampdu_status = le32_to_cpu(rx_pkt_status);
1013 } else {
1014 if (!priv->_agn.last_phy_res_valid) {
1015 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1016 return;
1017 }
1018 phy_res = &priv->_agn.last_phy_res;
1019 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1020 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1021 len = le16_to_cpu(amsdu->byte_count);
1022 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1023 ampdu_status = iwlagn_translate_rx_status(priv,
1024 le32_to_cpu(rx_pkt_status));
1025 }
1026
1027 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1028 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1029 phy_res->cfg_phy_cnt);
1030 return;
1031 }
1032
1033 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1034 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1035 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1036 le32_to_cpu(rx_pkt_status));
1037 return;
1038 }
1039
1040 /* This will be used in several places later */
1041 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1042
1043 /* rx_status carries information about the packet to mac80211 */
1044 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1045 rx_status.freq =
1046 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1047 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1048 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1049 rx_status.rate_idx =
1050 iwlagn_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1051 rx_status.flag = 0;
1052
1053 /* TSF isn't reliable. In order to allow smooth user experience,
1054 * this W/A doesn't propagate it to the mac80211 */
1055 /*rx_status.flag |= RX_FLAG_TSFT;*/
1056
1057 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1058
1059 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1060 rx_status.signal = iwlagn_calc_rssi(priv, phy_res);
1061
1062#ifdef CONFIG_IWLWIFI_DEBUG
1063 /* Set "1" to report good data frames in groups of 100 */
1064 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1065 iwlagn_dbg_report_frame(priv, phy_res, len, header, 1);
1066#endif
1067 iwl_dbg_log_rx_data_frame(priv, len, header);
1068 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, TSF %llu\n",
1069 rx_status.signal, (unsigned long long)rx_status.mactime);
1070
1071 /*
1072 * "antenna number"
1073 *
1074 * It seems that the antenna field in the phy flags value
1075 * is actually a bit field. This is undefined by radiotap,
1076 * it wants an actual antenna number but I always get "7"
1077 * for most legacy frames I receive indicating that the
1078 * same frame was received on all three RX chains.
1079 *
1080 * I think this field should be removed in favor of a
1081 * new 802.11n radiotap field "RX chains" that is defined
1082 * as a bitmask.
1083 */
1084 rx_status.antenna =
1085 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1086 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1087
1088 /* set the preamble flag if appropriate */
1089 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1090 rx_status.flag |= RX_FLAG_SHORTPRE;
1091
1092 /* Set up the HT phy flags */
1093 if (rate_n_flags & RATE_MCS_HT_MSK)
1094 rx_status.flag |= RX_FLAG_HT;
1095 if (rate_n_flags & RATE_MCS_HT40_MSK)
1096 rx_status.flag |= RX_FLAG_40MHZ;
1097 if (rate_n_flags & RATE_MCS_SGI_MSK)
1098 rx_status.flag |= RX_FLAG_SHORT_GI;
1099
1100 iwlagn_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1101 rxb, &rx_status);
1102}
1103
1104/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1105 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1106void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
1107 struct iwl_rx_mem_buffer *rxb)
1108{
1109 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1110 priv->_agn.last_phy_res_valid = true;
1111 memcpy(&priv->_agn.last_phy_res, pkt->u.raw,
1112 sizeof(struct iwl_rx_phy_res));
1113}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 1460116d329f..f7d85a2173c8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -611,10 +611,6 @@ static u16 rs_get_supported_rates(struct iwl_lq_sta *lq_sta,
611 struct ieee80211_hdr *hdr, 611 struct ieee80211_hdr *hdr,
612 enum iwl_table_type rate_type) 612 enum iwl_table_type rate_type)
613{ 613{
614 if (hdr && is_multicast_ether_addr(hdr->addr1) &&
615 lq_sta->active_rate_basic)
616 return lq_sta->active_rate_basic;
617
618 if (is_legacy(rate_type)) { 614 if (is_legacy(rate_type)) {
619 return lq_sta->active_legacy_rate; 615 return lq_sta->active_legacy_rate;
620 } else { 616 } else {
@@ -775,6 +771,15 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
775 771
776 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 772 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
777 773
774 /* Treat uninitialized rate scaling data same as non-existing. */
775 if (!lq_sta) {
776 IWL_DEBUG_RATE(priv, "Station rate scaling not created yet.\n");
777 return;
778 } else if (!lq_sta->drv) {
779 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
780 return;
781 }
782
778 if (!ieee80211_is_data(hdr->frame_control) || 783 if (!ieee80211_is_data(hdr->frame_control) ||
779 info->flags & IEEE80211_TX_CTL_NO_ACK) 784 info->flags & IEEE80211_TX_CTL_NO_ACK)
780 return; 785 return;
@@ -784,10 +789,6 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
784 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 789 !(info->flags & IEEE80211_TX_STAT_AMPDU))
785 return; 790 return;
786 791
787 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
788 !lq_sta->ibss_sta_added)
789 return;
790
791 /* 792 /*
792 * Ignore this Tx frame response if its initial rate doesn't match 793 * Ignore this Tx frame response if its initial rate doesn't match
793 * that of latest Link Quality command. There may be stragglers 794 * that of latest Link Quality command. There may be stragglers
@@ -833,7 +834,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
833 lq_sta->missed_rate_counter++; 834 lq_sta->missed_rate_counter++;
834 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 835 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
835 lq_sta->missed_rate_counter = 0; 836 lq_sta->missed_rate_counter = 0;
836 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 837 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
837 } 838 }
838 /* Regardless, ignore this status info for outdated rate */ 839 /* Regardless, ignore this status info for outdated rate */
839 return; 840 return;
@@ -1913,7 +1914,7 @@ static u32 rs_update_rate_tbl(struct iwl_priv *priv,
1913 /* Update uCode's rate table. */ 1914 /* Update uCode's rate table. */
1914 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green); 1915 rate = rate_n_flags_from_tbl(priv, tbl, index, is_green);
1915 rs_fill_link_cmd(priv, lq_sta, rate); 1916 rs_fill_link_cmd(priv, lq_sta, rate);
1916 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 1917 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
1917 1918
1918 return rate; 1919 return rate;
1919} 1920}
@@ -2002,7 +2003,7 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2002 /* rates available for this association, and for modulation mode */ 2003 /* rates available for this association, and for modulation mode */
2003 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type); 2004 rate_mask = rs_get_supported_rates(lq_sta, hdr, tbl->lq_type);
2004 2005
2005 IWL_DEBUG_RATE(priv, "mask 0x%04X \n", rate_mask); 2006 IWL_DEBUG_RATE(priv, "mask 0x%04X\n", rate_mask);
2006 2007
2007 /* mask with station rate restriction */ 2008 /* mask with station rate restriction */
2008 if (is_legacy(tbl->lq_type)) { 2009 if (is_legacy(tbl->lq_type)) {
@@ -2289,7 +2290,7 @@ lq_update:
2289 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n", 2290 IWL_DEBUG_RATE(priv, "Switch current mcs: %X index: %d\n",
2290 tbl->current_rate, index); 2291 tbl->current_rate, index);
2291 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate); 2292 rs_fill_link_cmd(priv, lq_sta, tbl->current_rate);
2292 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2293 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC, false);
2293 } else 2294 } else
2294 done_search = 1; 2295 done_search = 1;
2295 } 2296 }
@@ -2338,7 +2339,20 @@ out:
2338 return; 2339 return;
2339} 2340}
2340 2341
2341 2342/**
2343 * rs_initialize_lq - Initialize a station's hardware rate table
2344 *
2345 * The uCode's station table contains a table of fallback rates
2346 * for automatic fallback during transmission.
2347 *
2348 * NOTE: This sets up a default set of values. These will be replaced later
2349 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
2350 * rc80211_simple.
2351 *
2352 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
2353 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
2354 * which requires station table entry to exist).
2355 */
2342static void rs_initialize_lq(struct iwl_priv *priv, 2356static void rs_initialize_lq(struct iwl_priv *priv,
2343 struct ieee80211_conf *conf, 2357 struct ieee80211_conf *conf,
2344 struct ieee80211_sta *sta, 2358 struct ieee80211_sta *sta,
@@ -2357,10 +2371,6 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2357 2371
2358 i = lq_sta->last_txrate_idx; 2372 i = lq_sta->last_txrate_idx;
2359 2373
2360 if ((lq_sta->lq.sta_id == 0xff) &&
2361 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
2362 goto out;
2363
2364 valid_tx_ant = priv->hw_params.valid_tx_ant; 2374 valid_tx_ant = priv->hw_params.valid_tx_ant;
2365 2375
2366 if (!lq_sta->search_better_tbl) 2376 if (!lq_sta->search_better_tbl)
@@ -2388,7 +2398,8 @@ static void rs_initialize_lq(struct iwl_priv *priv,
2388 tbl->current_rate = rate; 2398 tbl->current_rate = rate;
2389 rs_set_expected_tpt_table(lq_sta, tbl); 2399 rs_set_expected_tpt_table(lq_sta, tbl);
2390 rs_fill_link_cmd(NULL, lq_sta, rate); 2400 rs_fill_link_cmd(NULL, lq_sta, rate);
2391 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 2401 priv->stations[lq_sta->lq.sta_id].lq = &lq_sta->lq;
2402 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_SYNC, true);
2392 out: 2403 out:
2393 return; 2404 return;
2394} 2405}
@@ -2399,10 +2410,7 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2399 2410
2400 struct sk_buff *skb = txrc->skb; 2411 struct sk_buff *skb = txrc->skb;
2401 struct ieee80211_supported_band *sband = txrc->sband; 2412 struct ieee80211_supported_band *sband = txrc->sband;
2402 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2413 struct iwl_priv *priv __maybe_unused = (struct iwl_priv *)priv_r;
2403 struct ieee80211_conf *conf = &priv->hw->conf;
2404 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2405 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
2406 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2414 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2407 struct iwl_lq_sta *lq_sta = priv_sta; 2415 struct iwl_lq_sta *lq_sta = priv_sta;
2408 int rate_idx; 2416 int rate_idx;
@@ -2420,30 +2428,18 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta,
2420 lq_sta->max_rate_idx = -1; 2428 lq_sta->max_rate_idx = -1;
2421 } 2429 }
2422 2430
2431 /* Treat uninitialized rate scaling data same as non-existing. */
2432 if (lq_sta && !lq_sta->drv) {
2433 IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n");
2434 priv_sta = NULL;
2435 }
2436
2423 /* Send management frames and NO_ACK data using lowest rate. */ 2437 /* Send management frames and NO_ACK data using lowest rate. */
2424 if (rate_control_send_low(sta, priv_sta, txrc)) 2438 if (rate_control_send_low(sta, priv_sta, txrc))
2425 return; 2439 return;
2426 2440
2427 rate_idx = lq_sta->last_txrate_idx; 2441 rate_idx = lq_sta->last_txrate_idx;
2428 2442
2429 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
2430 !lq_sta->ibss_sta_added) {
2431 u8 sta_id = iwl_find_station(priv, hdr->addr1);
2432
2433 if (sta_id == IWL_INVALID_STATION) {
2434 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n",
2435 hdr->addr1);
2436 sta_id = iwl_add_station(priv, hdr->addr1,
2437 false, CMD_ASYNC, ht_cap);
2438 }
2439 if ((sta_id != IWL_INVALID_STATION)) {
2440 lq_sta->lq.sta_id = sta_id;
2441 lq_sta->lq.rs_table[0].rate_n_flags = 0;
2442 lq_sta->ibss_sta_added = 1;
2443 rs_initialize_lq(priv, conf, sta, lq_sta);
2444 }
2445 }
2446
2447 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) { 2443 if (lq_sta->last_rate_n_flags & RATE_MCS_HT_MSK) {
2448 rate_idx -= IWL_FIRST_OFDM_RATE; 2444 rate_idx -= IWL_FIRST_OFDM_RATE;
2449 /* 6M and 9M shared same MCS index */ 2445 /* 6M and 9M shared same MCS index */
@@ -2493,16 +2489,25 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2493 return lq_sta; 2489 return lq_sta;
2494} 2490}
2495 2491
2496static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband, 2492/*
2497 struct ieee80211_sta *sta, void *priv_sta) 2493 * Called after adding a new station to initialize rate scaling
2494 */
2495void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_id)
2498{ 2496{
2499 int i, j; 2497 int i, j;
2500 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 2498 struct ieee80211_hw *hw = priv->hw;
2501 struct ieee80211_conf *conf = &priv->hw->conf; 2499 struct ieee80211_conf *conf = &priv->hw->conf;
2502 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2500 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2503 struct iwl_lq_sta *lq_sta = priv_sta; 2501 struct iwl_station_priv *sta_priv;
2502 struct iwl_lq_sta *lq_sta;
2503 struct ieee80211_supported_band *sband;
2504
2505 sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2506 lq_sta = &sta_priv->lq_sta;
2507 sband = hw->wiphy->bands[conf->channel->band];
2508
2504 2509
2505 lq_sta->lq.sta_id = 0xff; 2510 lq_sta->lq.sta_id = sta_id;
2506 2511
2507 for (j = 0; j < LQ_SIZE; j++) 2512 for (j = 0; j < LQ_SIZE; j++)
2508 for (i = 0; i < IWL_RATE_COUNT; i++) 2513 for (i = 0; i < IWL_RATE_COUNT; i++)
@@ -2514,39 +2519,18 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2514 for (i = 0; i < IWL_RATE_COUNT; i++) 2519 for (i = 0; i < IWL_RATE_COUNT; i++)
2515 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]); 2520 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2516 2521
2517 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init ***\n"); 2522 IWL_DEBUG_RATE(priv, "LQ: *** rate scale station global init for station %d ***\n",
2523 sta_id);
2518 /* TODO: what is a good starting rate for STA? About middle? Maybe not 2524 /* TODO: what is a good starting rate for STA? About middle? Maybe not
2519 * the lowest or the highest rate.. Could consider using RSSI from 2525 * the lowest or the highest rate.. Could consider using RSSI from
2520 * previous packets? Need to have IEEE 802.1X auth succeed immediately 2526 * previous packets? Need to have IEEE 802.1X auth succeed immediately
2521 * after assoc.. */ 2527 * after assoc.. */
2522 2528
2523 lq_sta->ibss_sta_added = 0;
2524 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2525 u8 sta_id = iwl_find_station(priv,
2526 sta->addr);
2527
2528 /* for IBSS the call are from tasklet */
2529 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2530
2531 if (sta_id == IWL_INVALID_STATION) {
2532 IWL_DEBUG_RATE(priv, "LQ: ADD station %pM\n", sta->addr);
2533 sta_id = iwl_add_station(priv, sta->addr, false,
2534 CMD_ASYNC, ht_cap);
2535 }
2536 if ((sta_id != IWL_INVALID_STATION)) {
2537 lq_sta->lq.sta_id = sta_id;
2538 lq_sta->lq.rs_table[0].rate_n_flags = 0;
2539 }
2540 /* FIXME: this is w/a remove it later */
2541 priv->assoc_station_added = 1;
2542 }
2543
2544 lq_sta->is_dup = 0; 2529 lq_sta->is_dup = 0;
2545 lq_sta->max_rate_idx = -1; 2530 lq_sta->max_rate_idx = -1;
2546 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX; 2531 lq_sta->missed_rate_counter = IWL_MISSED_RATE_MAX;
2547 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config); 2532 lq_sta->is_green = rs_use_green(sta, &priv->current_ht_config);
2548 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000); 2533 lq_sta->active_legacy_rate = priv->active_rate & ~(0x1000);
2549 lq_sta->active_rate_basic = priv->active_rate_basic;
2550 lq_sta->band = priv->band; 2534 lq_sta->band = priv->band;
2551 /* 2535 /*
2552 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3), 2536 * active_siso_rate mask includes 9 MBits (bit 5), and CCK (bits 0-3),
@@ -2794,7 +2778,7 @@ static ssize_t rs_sta_dbgfs_scale_table_write(struct file *file,
2794 2778
2795 if (lq_sta->dbg_fixed_rate) { 2779 if (lq_sta->dbg_fixed_rate) {
2796 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate); 2780 rs_fill_link_cmd(NULL, lq_sta, lq_sta->dbg_fixed_rate);
2797 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC); 2781 iwl_send_lq_cmd(lq_sta->drv, &lq_sta->lq, CMD_ASYNC, false);
2798 } 2782 }
2799 2783
2800 return count; 2784 return count;
@@ -2950,12 +2934,6 @@ static ssize_t rs_sta_dbgfs_rate_scale_data_read(struct file *file,
2950 desc += sprintf(buff+desc, 2934 desc += sprintf(buff+desc,
2951 "Bit Rate= %d Mb/s\n", 2935 "Bit Rate= %d Mb/s\n",
2952 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1); 2936 iwl_rates[lq_sta->last_txrate_idx].ieee >> 1);
2953 desc += sprintf(buff+desc,
2954 "Signal Level= %d dBm\tNoise Level= %d dBm\n",
2955 priv->last_rx_rssi, priv->last_rx_noise);
2956 desc += sprintf(buff+desc,
2957 "Tsf= 0x%llx\tBeacon time= 0x%08X\n",
2958 priv->last_tsf, priv->last_beacon_time);
2959 2937
2960 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc); 2938 ret = simple_read_from_buffer(user_buf, count, ppos, buff, desc);
2961 return ret; 2939 return ret;
@@ -2995,12 +2973,21 @@ static void rs_remove_debugfs(void *priv, void *priv_sta)
2995} 2973}
2996#endif 2974#endif
2997 2975
2976/*
2977 * Initialization of rate scaling information is done by driver after
2978 * the station is added. Since mac80211 calls this function before a
2979 * station is added we ignore it.
2980 */
2981static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sband,
2982 struct ieee80211_sta *sta, void *priv_sta)
2983{
2984}
2998static struct rate_control_ops rs_ops = { 2985static struct rate_control_ops rs_ops = {
2999 .module = NULL, 2986 .module = NULL,
3000 .name = RS_NAME, 2987 .name = RS_NAME,
3001 .tx_status = rs_tx_status, 2988 .tx_status = rs_tx_status,
3002 .get_rate = rs_get_rate, 2989 .get_rate = rs_get_rate,
3003 .rate_init = rs_rate_init, 2990 .rate_init = rs_rate_init_stub,
3004 .alloc = rs_alloc, 2991 .alloc = rs_alloc,
3005 .free = rs_free, 2992 .free = rs_free,
3006 .alloc_sta = rs_alloc_sta, 2993 .alloc_sta = rs_alloc_sta,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index e71923961e69..8292f6d48ec6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -403,7 +403,6 @@ struct iwl_lq_sta {
403 u8 is_green; 403 u8 is_green;
404 u8 is_dup; 404 u8 is_dup;
405 enum ieee80211_band band; 405 enum ieee80211_band band;
406 u8 ibss_sta_added;
407 406
408 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */ 407 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
409 u32 supp_rates; 408 u32 supp_rates;
@@ -411,7 +410,6 @@ struct iwl_lq_sta {
411 u16 active_siso_rate; 410 u16 active_siso_rate;
412 u16 active_mimo2_rate; 411 u16 active_mimo2_rate;
413 u16 active_mimo3_rate; 412 u16 active_mimo3_rate;
414 u16 active_rate_basic;
415 s8 max_rate_idx; /* Max rate set by user */ 413 s8 max_rate_idx; /* Max rate set by user */
416 u8 missed_rate_counter; 414 u8 missed_rate_counter;
417 415
@@ -479,6 +477,12 @@ static inline u8 iwl3945_get_prev_ieee_rate(u8 rate_index)
479 */ 477 */
480extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id); 478extern void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id);
481 479
480/* Initialize station's rate scaling information after adding station */
481extern void iwl_rs_rate_init(struct iwl_priv *priv,
482 struct ieee80211_sta *sta, u8 sta_id);
483extern void iwl3945_rs_rate_init(struct iwl_priv *priv,
484 struct ieee80211_sta *sta, u8 sta_id);
485
482/** 486/**
483 * iwl_rate_control_register - Register the rate control algorithm callbacks 487 * iwl_rate_control_register - Register the rate control algorithm callbacks
484 * 488 *
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
new file mode 100644
index 000000000000..3077eac58880
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -0,0 +1,1333 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-sta.h"
38#include "iwl-io.h"
39#include "iwl-helpers.h"
40#include "iwl-agn-hw.h"
41#include "iwl-agn.h"
42
43/*
44 * mac80211 queues, ACs, hardware queues, FIFOs.
45 *
46 * Cf. http://wireless.kernel.org/en/developers/Documentation/mac80211/queues
47 *
48 * Mac80211 uses the following numbers, which we get as from it
49 * by way of skb_get_queue_mapping(skb):
50 *
51 * VO 0
52 * VI 1
53 * BE 2
54 * BK 3
55 *
56 *
57 * Regular (not A-MPDU) frames are put into hardware queues corresponding
58 * to the FIFOs, see comments in iwl-prph.h. Aggregated frames get their
59 * own queue per aggregation session (RA/TID combination), such queues are
60 * set up to map into FIFOs too, for which we need an AC->FIFO mapping. In
61 * order to map frames to the right queue, we also need an AC->hw queue
62 * mapping. This is implemented here.
63 *
64 * Due to the way hw queues are set up (by the hw specific modules like
65 * iwl-4965.c, iwl-5000.c etc.), the AC->hw queue mapping is the identity
66 * mapping.
67 */
68
69static const u8 tid_to_ac[] = {
70 /* this matches the mac80211 numbers */
71 2, 3, 3, 2, 1, 1, 0, 0
72};
73
74static const u8 ac_to_fifo[] = {
75 IWL_TX_FIFO_VO,
76 IWL_TX_FIFO_VI,
77 IWL_TX_FIFO_BE,
78 IWL_TX_FIFO_BK,
79};
80
81static inline int get_fifo_from_ac(u8 ac)
82{
83 return ac_to_fifo[ac];
84}
85
86static inline int get_fifo_from_tid(u16 tid)
87{
88 if (likely(tid < ARRAY_SIZE(tid_to_ac)))
89 return get_fifo_from_ac(tid_to_ac[tid]);
90
91 /* no support for TIDs 8-15 yet */
92 return -EINVAL;
93}
94
95/**
96 * iwlagn_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
97 */
98void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
99 struct iwl_tx_queue *txq,
100 u16 byte_cnt)
101{
102 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
103 int write_ptr = txq->q.write_ptr;
104 int txq_id = txq->q.id;
105 u8 sec_ctl = 0;
106 u8 sta_id = 0;
107 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
108 __le16 bc_ent;
109
110 WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX);
111
112 if (txq_id != IWL_CMD_QUEUE_NUM) {
113 sta_id = txq->cmd[txq->q.write_ptr]->cmd.tx.sta_id;
114 sec_ctl = txq->cmd[txq->q.write_ptr]->cmd.tx.sec_ctl;
115
116 switch (sec_ctl & TX_CMD_SEC_MSK) {
117 case TX_CMD_SEC_CCM:
118 len += CCMP_MIC_LEN;
119 break;
120 case TX_CMD_SEC_TKIP:
121 len += TKIP_ICV_LEN;
122 break;
123 case TX_CMD_SEC_WEP:
124 len += WEP_IV_LEN + WEP_ICV_LEN;
125 break;
126 }
127 }
128
129 bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12));
130
131 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
132
133 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
134 scd_bc_tbl[txq_id].
135 tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
136}
137
138void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
139 struct iwl_tx_queue *txq)
140{
141 struct iwlagn_scd_bc_tbl *scd_bc_tbl = priv->scd_bc_tbls.addr;
142 int txq_id = txq->q.id;
143 int read_ptr = txq->q.read_ptr;
144 u8 sta_id = 0;
145 __le16 bc_ent;
146
147 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
148
149 if (txq_id != IWL_CMD_QUEUE_NUM)
150 sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
151
152 bc_ent = cpu_to_le16(1 | (sta_id << 12));
153 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
154
155 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
156 scd_bc_tbl[txq_id].
157 tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
158}
159
160static int iwlagn_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
161 u16 txq_id)
162{
163 u32 tbl_dw_addr;
164 u32 tbl_dw;
165 u16 scd_q2ratid;
166
167 scd_q2ratid = ra_tid & IWL_SCD_QUEUE_RA_TID_MAP_RATID_MSK;
168
169 tbl_dw_addr = priv->scd_base_addr +
170 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(txq_id);
171
172 tbl_dw = iwl_read_targ_mem(priv, tbl_dw_addr);
173
174 if (txq_id & 0x1)
175 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
176 else
177 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
178
179 iwl_write_targ_mem(priv, tbl_dw_addr, tbl_dw);
180
181 return 0;
182}
183
184static void iwlagn_tx_queue_stop_scheduler(struct iwl_priv *priv, u16 txq_id)
185{
186 /* Simply stop the queue, but don't change any configuration;
187 * the SCD_ACT_EN bit is the write-enable mask for the ACTIVE bit. */
188 iwl_write_prph(priv,
189 IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
190 (0 << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE)|
191 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
192}
193
194void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
195 int txq_id, u32 index)
196{
197 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
198 (index & 0xff) | (txq_id << 8));
199 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(txq_id), index);
200}
201
202void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
203 struct iwl_tx_queue *txq,
204 int tx_fifo_id, int scd_retry)
205{
206 int txq_id = txq->q.id;
207 int active = test_bit(txq_id, &priv->txq_ctx_active_msk) ? 1 : 0;
208
209 iwl_write_prph(priv, IWL50_SCD_QUEUE_STATUS_BITS(txq_id),
210 (active << IWL50_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
211 (tx_fifo_id << IWL50_SCD_QUEUE_STTS_REG_POS_TXF) |
212 (1 << IWL50_SCD_QUEUE_STTS_REG_POS_WSL) |
213 IWL50_SCD_QUEUE_STTS_REG_MSK);
214
215 txq->sched_retry = scd_retry;
216
217 IWL_DEBUG_INFO(priv, "%s %s Queue %d on FIFO %d\n",
218 active ? "Activate" : "Deactivate",
219 scd_retry ? "BA" : "AC/CMD", txq_id, tx_fifo_id);
220}
221
222int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
223 int tx_fifo, int sta_id, int tid, u16 ssn_idx)
224{
225 unsigned long flags;
226 u16 ra_tid;
227
228 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
229 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
230 <= txq_id)) {
231 IWL_WARN(priv,
232 "queue number out of range: %d, must be %d to %d\n",
233 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
234 IWLAGN_FIRST_AMPDU_QUEUE +
235 priv->cfg->num_of_ampdu_queues - 1);
236 return -EINVAL;
237 }
238
239 ra_tid = BUILD_RAxTID(sta_id, tid);
240
241 /* Modify device's station table to Tx this TID */
242 iwl_sta_tx_modify_enable_tid(priv, sta_id, tid);
243
244 spin_lock_irqsave(&priv->lock, flags);
245
246 /* Stop this Tx queue before configuring it */
247 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
248
249 /* Map receiver-address / traffic-ID to this queue */
250 iwlagn_tx_queue_set_q2ratid(priv, ra_tid, txq_id);
251
252 /* Set this queue as a chain-building queue */
253 iwl_set_bits_prph(priv, IWL50_SCD_QUEUECHAIN_SEL, (1<<txq_id));
254
255 /* enable aggregations for the queue */
256 iwl_set_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1<<txq_id));
257
258 /* Place first TFD at index corresponding to start sequence number.
259 * Assumes that ssn_idx is valid (!= 0xFFF) */
260 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
261 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
262 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
263
264 /* Set up Tx window size and frame limit for this queue */
265 iwl_write_targ_mem(priv, priv->scd_base_addr +
266 IWL50_SCD_CONTEXT_QUEUE_OFFSET(txq_id) +
267 sizeof(u32),
268 ((SCD_WIN_SIZE <<
269 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
270 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
271 ((SCD_FRAME_LIMIT <<
272 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
273 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
274
275 iwl_set_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
276
277 /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
278 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 1);
279
280 spin_unlock_irqrestore(&priv->lock, flags);
281
282 return 0;
283}
284
285int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
286 u16 ssn_idx, u8 tx_fifo)
287{
288 if ((IWLAGN_FIRST_AMPDU_QUEUE > txq_id) ||
289 (IWLAGN_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
290 <= txq_id)) {
291 IWL_ERR(priv,
292 "queue number out of range: %d, must be %d to %d\n",
293 txq_id, IWLAGN_FIRST_AMPDU_QUEUE,
294 IWLAGN_FIRST_AMPDU_QUEUE +
295 priv->cfg->num_of_ampdu_queues - 1);
296 return -EINVAL;
297 }
298
299 iwlagn_tx_queue_stop_scheduler(priv, txq_id);
300
301 iwl_clear_bits_prph(priv, IWL50_SCD_AGGR_SEL, (1 << txq_id));
302
303 priv->txq[txq_id].q.read_ptr = (ssn_idx & 0xff);
304 priv->txq[txq_id].q.write_ptr = (ssn_idx & 0xff);
305 /* supposes that ssn_idx is valid (!= 0xFFF) */
306 iwlagn_set_wr_ptrs(priv, txq_id, ssn_idx);
307
308 iwl_clear_bits_prph(priv, IWL50_SCD_INTERRUPT_MASK, (1 << txq_id));
309 iwl_txq_ctx_deactivate(priv, txq_id);
310 iwlagn_tx_queue_set_status(priv, &priv->txq[txq_id], tx_fifo, 0);
311
312 return 0;
313}
314
315/*
316 * Activate/Deactivate Tx DMA/FIFO channels according tx fifos mask
317 * must be called under priv->lock and mac access
318 */
319void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask)
320{
321 iwl_write_prph(priv, IWL50_SCD_TXFACT, mask);
322}
323
324static inline int get_queue_from_ac(u16 ac)
325{
326 return ac;
327}
328
329/*
330 * handle build REPLY_TX command notification.
331 */
332static void iwlagn_tx_cmd_build_basic(struct iwl_priv *priv,
333 struct iwl_tx_cmd *tx_cmd,
334 struct ieee80211_tx_info *info,
335 struct ieee80211_hdr *hdr,
336 u8 std_id)
337{
338 __le16 fc = hdr->frame_control;
339 __le32 tx_flags = tx_cmd->tx_flags;
340
341 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
342 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
343 tx_flags |= TX_CMD_FLG_ACK_MSK;
344 if (ieee80211_is_mgmt(fc))
345 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
346 if (ieee80211_is_probe_resp(fc) &&
347 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
348 tx_flags |= TX_CMD_FLG_TSF_MSK;
349 } else {
350 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
351 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
352 }
353
354 if (ieee80211_is_back_req(fc))
355 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
356
357
358 tx_cmd->sta_id = std_id;
359 if (ieee80211_has_morefrags(fc))
360 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
361
362 if (ieee80211_is_data_qos(fc)) {
363 u8 *qc = ieee80211_get_qos_ctl(hdr);
364 tx_cmd->tid_tspec = qc[0] & 0xf;
365 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
366 } else {
367 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
368 }
369
370 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
371
372 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
373 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
374
375 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
376 if (ieee80211_is_mgmt(fc)) {
377 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
378 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
379 else
380 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
381 } else {
382 tx_cmd->timeout.pm_frame_timeout = 0;
383 }
384
385 tx_cmd->driver_txop = 0;
386 tx_cmd->tx_flags = tx_flags;
387 tx_cmd->next_frame_len = 0;
388}
389
390#define RTS_DFAULT_RETRY_LIMIT 60
391
392static void iwlagn_tx_cmd_build_rate(struct iwl_priv *priv,
393 struct iwl_tx_cmd *tx_cmd,
394 struct ieee80211_tx_info *info,
395 __le16 fc)
396{
397 u32 rate_flags;
398 int rate_idx;
399 u8 rts_retry_limit;
400 u8 data_retry_limit;
401 u8 rate_plcp;
402
403 /* Set retry limit on DATA packets and Probe Responses*/
404 if (ieee80211_is_probe_resp(fc))
405 data_retry_limit = 3;
406 else
407 data_retry_limit = IWLAGN_DEFAULT_TX_RETRY;
408 tx_cmd->data_retry_limit = data_retry_limit;
409
410 /* Set retry limit on RTS packets */
411 rts_retry_limit = RTS_DFAULT_RETRY_LIMIT;
412 if (data_retry_limit < rts_retry_limit)
413 rts_retry_limit = data_retry_limit;
414 tx_cmd->rts_retry_limit = rts_retry_limit;
415
416 /* DATA packets will use the uCode station table for rate/antenna
417 * selection */
418 if (ieee80211_is_data(fc)) {
419 tx_cmd->initial_rate_index = 0;
420 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
421 return;
422 }
423
424 /**
425 * If the current TX rate stored in mac80211 has the MCS bit set, it's
426 * not really a TX rate. Thus, we use the lowest supported rate for
427 * this band. Also use the lowest supported rate if the stored rate
428 * index is invalid.
429 */
430 rate_idx = info->control.rates[0].idx;
431 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
432 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
433 rate_idx = rate_lowest_index(&priv->bands[info->band],
434 info->control.sta);
435 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
436 if (info->band == IEEE80211_BAND_5GHZ)
437 rate_idx += IWL_FIRST_OFDM_RATE;
438 /* Get PLCP rate for tx_cmd->rate_n_flags */
439 rate_plcp = iwl_rates[rate_idx].plcp;
440 /* Zero out flags for this packet */
441 rate_flags = 0;
442
443 /* Set CCK flag as needed */
444 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
445 rate_flags |= RATE_MCS_CCK_MSK;
446
447 /* Set up RTS and CTS flags for certain packets */
448 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
449 case cpu_to_le16(IEEE80211_STYPE_AUTH):
450 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
451 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
452 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
453 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
454 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
455 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
456 }
457 break;
458 default:
459 break;
460 }
461
462 /* Set up antennas */
463 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
464 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
465
466 /* Set the rate in the TX cmd */
467 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
468}
469
470static void iwlagn_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
471 struct ieee80211_tx_info *info,
472 struct iwl_tx_cmd *tx_cmd,
473 struct sk_buff *skb_frag,
474 int sta_id)
475{
476 struct ieee80211_key_conf *keyconf = info->control.hw_key;
477
478 switch (keyconf->alg) {
479 case ALG_CCMP:
480 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
481 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
482 if (info->flags & IEEE80211_TX_CTL_AMPDU)
483 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
484 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
485 break;
486
487 case ALG_TKIP:
488 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
489 ieee80211_get_tkip_key(keyconf, skb_frag,
490 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
491 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
492 break;
493
494 case ALG_WEP:
495 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
496 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
497
498 if (keyconf->keylen == WEP_KEY_LEN_128)
499 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
500
501 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
502
503 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
504 "with key %d\n", keyconf->keyidx);
505 break;
506
507 default:
508 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
509 break;
510 }
511}
512
513/*
514 * start REPLY_TX command process
515 */
516int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
517{
518 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
519 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
520 struct ieee80211_sta *sta = info->control.sta;
521 struct iwl_station_priv *sta_priv = NULL;
522 struct iwl_tx_queue *txq;
523 struct iwl_queue *q;
524 struct iwl_device_cmd *out_cmd;
525 struct iwl_cmd_meta *out_meta;
526 struct iwl_tx_cmd *tx_cmd;
527 int swq_id, txq_id;
528 dma_addr_t phys_addr;
529 dma_addr_t txcmd_phys;
530 dma_addr_t scratch_phys;
531 u16 len, len_org, firstlen, secondlen;
532 u16 seq_number = 0;
533 __le16 fc;
534 u8 hdr_len;
535 u8 sta_id;
536 u8 wait_write_ptr = 0;
537 u8 tid = 0;
538 u8 *qc = NULL;
539 unsigned long flags;
540
541 spin_lock_irqsave(&priv->lock, flags);
542 if (iwl_is_rfkill(priv)) {
543 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
544 goto drop_unlock;
545 }
546
547 fc = hdr->frame_control;
548
549#ifdef CONFIG_IWLWIFI_DEBUG
550 if (ieee80211_is_auth(fc))
551 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
552 else if (ieee80211_is_assoc_req(fc))
553 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
554 else if (ieee80211_is_reassoc_req(fc))
555 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
556#endif
557
558 hdr_len = ieee80211_hdrlen(fc);
559
560 /* Find (or create) index into station table for destination station */
561 if (info->flags & IEEE80211_TX_CTL_INJECTED)
562 sta_id = priv->hw_params.bcast_sta_id;
563 else
564 sta_id = iwl_get_sta_id(priv, hdr);
565 if (sta_id == IWL_INVALID_STATION) {
566 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
567 hdr->addr1);
568 goto drop_unlock;
569 }
570
571 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
572
573 if (sta)
574 sta_priv = (void *)sta->drv_priv;
575
576 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
577 sta_priv->asleep) {
578 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
579 /*
580 * This sends an asynchronous command to the device,
581 * but we can rely on it being processed before the
582 * next frame is processed -- and the next frame to
583 * this station is the one that will consume this
584 * counter.
585 * For now set the counter to just 1 since we do not
586 * support uAPSD yet.
587 */
588 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
589 }
590
591 txq_id = get_queue_from_ac(skb_get_queue_mapping(skb));
592 if (ieee80211_is_data_qos(fc)) {
593 qc = ieee80211_get_qos_ctl(hdr);
594 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
595 if (unlikely(tid >= MAX_TID_COUNT))
596 goto drop_unlock;
597 seq_number = priv->stations[sta_id].tid[tid].seq_number;
598 seq_number &= IEEE80211_SCTL_SEQ;
599 hdr->seq_ctrl = hdr->seq_ctrl &
600 cpu_to_le16(IEEE80211_SCTL_FRAG);
601 hdr->seq_ctrl |= cpu_to_le16(seq_number);
602 seq_number += 0x10;
603 /* aggregation is on for this <sta,tid> */
604 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
605 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
606 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
607 }
608 }
609
610 txq = &priv->txq[txq_id];
611 swq_id = txq->swq_id;
612 q = &txq->q;
613
614 if (unlikely(iwl_queue_space(q) < q->high_mark))
615 goto drop_unlock;
616
617 if (ieee80211_is_data_qos(fc))
618 priv->stations[sta_id].tid[tid].tfds_in_queue++;
619
620 /* Set up driver data for this TFD */
621 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
622 txq->txb[q->write_ptr].skb[0] = skb;
623
624 /* Set up first empty entry in queue's array of Tx/cmd buffers */
625 out_cmd = txq->cmd[q->write_ptr];
626 out_meta = &txq->meta[q->write_ptr];
627 tx_cmd = &out_cmd->cmd.tx;
628 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
629 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
630
631 /*
632 * Set up the Tx-command (not MAC!) header.
633 * Store the chosen Tx queue and TFD index within the sequence field;
634 * after Tx, uCode's Tx response will return this value so driver can
635 * locate the frame within the tx queue and do post-tx processing.
636 */
637 out_cmd->hdr.cmd = REPLY_TX;
638 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
639 INDEX_TO_SEQ(q->write_ptr)));
640
641 /* Copy MAC header from skb into command buffer */
642 memcpy(tx_cmd->hdr, hdr, hdr_len);
643
644
645 /* Total # bytes to be transmitted */
646 len = (u16)skb->len;
647 tx_cmd->len = cpu_to_le16(len);
648
649 if (info->control.hw_key)
650 iwlagn_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
651
652 /* TODO need this for burst mode later on */
653 iwlagn_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
654 iwl_dbg_log_tx_data_frame(priv, len, hdr);
655
656 iwlagn_tx_cmd_build_rate(priv, tx_cmd, info, fc);
657
658 iwl_update_stats(priv, true, fc, len);
659 /*
660 * Use the first empty entry in this queue's command buffer array
661 * to contain the Tx command and MAC header concatenated together
662 * (payload data will be in another buffer).
663 * Size of this varies, due to varying MAC header length.
664 * If end is not dword aligned, we'll have 2 extra bytes at the end
665 * of the MAC header (device reads on dword boundaries).
666 * We'll tell device about this padding later.
667 */
668 len = sizeof(struct iwl_tx_cmd) +
669 sizeof(struct iwl_cmd_header) + hdr_len;
670
671 len_org = len;
672 firstlen = len = (len + 3) & ~3;
673
674 if (len_org != len)
675 len_org = 1;
676 else
677 len_org = 0;
678
679 /* Tell NIC about any 2-byte padding after MAC header */
680 if (len_org)
681 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
682
683 /* Physical address of this Tx command's header (not MAC header!),
684 * within command buffer array. */
685 txcmd_phys = pci_map_single(priv->pci_dev,
686 &out_cmd->hdr, len,
687 PCI_DMA_BIDIRECTIONAL);
688 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
689 pci_unmap_len_set(out_meta, len, len);
690 /* Add buffer containing Tx command and MAC(!) header to TFD's
691 * first entry */
692 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
693 txcmd_phys, len, 1, 0);
694
695 if (!ieee80211_has_morefrags(hdr->frame_control)) {
696 txq->need_update = 1;
697 if (qc)
698 priv->stations[sta_id].tid[tid].seq_number = seq_number;
699 } else {
700 wait_write_ptr = 1;
701 txq->need_update = 0;
702 }
703
704 /* Set up TFD's 2nd entry to point directly to remainder of skb,
705 * if any (802.11 null frames have no payload). */
706 secondlen = len = skb->len - hdr_len;
707 if (len) {
708 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
709 len, PCI_DMA_TODEVICE);
710 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
711 phys_addr, len,
712 0, 0);
713 }
714
715 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
716 offsetof(struct iwl_tx_cmd, scratch);
717
718 len = sizeof(struct iwl_tx_cmd) +
719 sizeof(struct iwl_cmd_header) + hdr_len;
720 /* take back ownership of DMA buffer to enable update */
721 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
722 len, PCI_DMA_BIDIRECTIONAL);
723 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
724 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
725
726 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
727 le16_to_cpu(out_cmd->hdr.sequence));
728 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
729 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
730 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
731
732 /* Set up entry for this TFD in Tx byte-count array */
733 if (info->flags & IEEE80211_TX_CTL_AMPDU)
734 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
735 le16_to_cpu(tx_cmd->len));
736
737 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
738 len, PCI_DMA_BIDIRECTIONAL);
739
740 trace_iwlwifi_dev_tx(priv,
741 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
742 sizeof(struct iwl_tfd),
743 &out_cmd->hdr, firstlen,
744 skb->data + hdr_len, secondlen);
745
746 /* Tell device the write index *just past* this latest filled TFD */
747 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
748 iwl_txq_update_write_ptr(priv, txq);
749 spin_unlock_irqrestore(&priv->lock, flags);
750
751 /*
752 * At this point the frame is "transmitted" successfully
753 * and we will get a TX status notification eventually,
754 * regardless of the value of ret. "ret" only indicates
755 * whether or not we should update the write pointer.
756 */
757
758 /* avoid atomic ops if it isn't an associated client */
759 if (sta_priv && sta_priv->client)
760 atomic_inc(&sta_priv->pending_frames);
761
762 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
763 if (wait_write_ptr) {
764 spin_lock_irqsave(&priv->lock, flags);
765 txq->need_update = 1;
766 iwl_txq_update_write_ptr(priv, txq);
767 spin_unlock_irqrestore(&priv->lock, flags);
768 } else {
769 iwl_stop_queue(priv, txq->swq_id);
770 }
771 }
772
773 return 0;
774
775drop_unlock:
776 spin_unlock_irqrestore(&priv->lock, flags);
777 return -1;
778}
779
780static inline int iwlagn_alloc_dma_ptr(struct iwl_priv *priv,
781 struct iwl_dma_ptr *ptr, size_t size)
782{
783 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
784 GFP_KERNEL);
785 if (!ptr->addr)
786 return -ENOMEM;
787 ptr->size = size;
788 return 0;
789}
790
791static inline void iwlagn_free_dma_ptr(struct iwl_priv *priv,
792 struct iwl_dma_ptr *ptr)
793{
794 if (unlikely(!ptr->addr))
795 return;
796
797 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
798 memset(ptr, 0, sizeof(*ptr));
799}
800
801/**
802 * iwlagn_hw_txq_ctx_free - Free TXQ Context
803 *
804 * Destroy all TX DMA queues and structures
805 */
806void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv)
807{
808 int txq_id;
809
810 /* Tx queues */
811 if (priv->txq) {
812 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
813 if (txq_id == IWL_CMD_QUEUE_NUM)
814 iwl_cmd_queue_free(priv);
815 else
816 iwl_tx_queue_free(priv, txq_id);
817 }
818 iwlagn_free_dma_ptr(priv, &priv->kw);
819
820 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
821
822 /* free tx queue structure */
823 iwl_free_txq_mem(priv);
824}
825
826/**
827 * iwlagn_txq_ctx_alloc - allocate TX queue context
828 * Allocate all Tx DMA structures and initialize them
829 *
830 * @param priv
831 * @return error code
832 */
833int iwlagn_txq_ctx_alloc(struct iwl_priv *priv)
834{
835 int ret;
836 int txq_id, slots_num;
837 unsigned long flags;
838
839 /* Free all tx/cmd queues and keep-warm buffer */
840 iwlagn_hw_txq_ctx_free(priv);
841
842 ret = iwlagn_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
843 priv->hw_params.scd_bc_tbls_size);
844 if (ret) {
845 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
846 goto error_bc_tbls;
847 }
848 /* Alloc keep-warm buffer */
849 ret = iwlagn_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
850 if (ret) {
851 IWL_ERR(priv, "Keep Warm allocation failed\n");
852 goto error_kw;
853 }
854
855 /* allocate tx queue structure */
856 ret = iwl_alloc_txq_mem(priv);
857 if (ret)
858 goto error;
859
860 spin_lock_irqsave(&priv->lock, flags);
861
862 /* Turn off all Tx DMA fifos */
863 priv->cfg->ops->lib->txq_set_sched(priv, 0);
864
865 /* Tell NIC where to find the "keep warm" buffer */
866 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
867
868 spin_unlock_irqrestore(&priv->lock, flags);
869
870 /* Alloc and init all Tx queues, including the command queue (#4) */
871 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
872 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
873 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
874 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
875 txq_id);
876 if (ret) {
877 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
878 goto error;
879 }
880 }
881
882 return ret;
883
884 error:
885 iwlagn_hw_txq_ctx_free(priv);
886 iwlagn_free_dma_ptr(priv, &priv->kw);
887 error_kw:
888 iwlagn_free_dma_ptr(priv, &priv->scd_bc_tbls);
889 error_bc_tbls:
890 return ret;
891}
892
893void iwlagn_txq_ctx_reset(struct iwl_priv *priv)
894{
895 int txq_id, slots_num;
896 unsigned long flags;
897
898 spin_lock_irqsave(&priv->lock, flags);
899
900 /* Turn off all Tx DMA fifos */
901 priv->cfg->ops->lib->txq_set_sched(priv, 0);
902
903 /* Tell NIC where to find the "keep warm" buffer */
904 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
905
906 spin_unlock_irqrestore(&priv->lock, flags);
907
908 /* Alloc and init all Tx queues, including the command queue (#4) */
909 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
910 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
911 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
912 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
913 }
914}
915
916/**
917 * iwlagn_txq_ctx_stop - Stop all Tx DMA channels
918 */
919void iwlagn_txq_ctx_stop(struct iwl_priv *priv)
920{
921 int ch;
922 unsigned long flags;
923
924 /* Turn off all Tx DMA fifos */
925 spin_lock_irqsave(&priv->lock, flags);
926
927 priv->cfg->ops->lib->txq_set_sched(priv, 0);
928
929 /* Stop each Tx DMA channel, and wait for it to be idle */
930 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
931 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
932 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
933 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
934 1000);
935 }
936 spin_unlock_irqrestore(&priv->lock, flags);
937}
938
939/*
940 * Find first available (lowest unused) Tx Queue, mark it "active".
941 * Called only when finding queue for aggregation.
942 * Should never return anything < 7, because they should already
943 * be in use as EDCA AC (0-3), Command (4), reserved (5, 6)
944 */
945static int iwlagn_txq_ctx_activate_free(struct iwl_priv *priv)
946{
947 int txq_id;
948
949 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
950 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
951 return txq_id;
952 return -1;
953}
954
955int iwlagn_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
956{
957 int sta_id;
958 int tx_fifo;
959 int txq_id;
960 int ret;
961 unsigned long flags;
962 struct iwl_tid_data *tid_data;
963
964 tx_fifo = get_fifo_from_tid(tid);
965 if (unlikely(tx_fifo < 0))
966 return tx_fifo;
967
968 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
969 __func__, ra, tid);
970
971 sta_id = iwl_find_station(priv, ra);
972 if (sta_id == IWL_INVALID_STATION) {
973 IWL_ERR(priv, "Start AGG on invalid station\n");
974 return -ENXIO;
975 }
976 if (unlikely(tid >= MAX_TID_COUNT))
977 return -EINVAL;
978
979 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
980 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
981 return -ENXIO;
982 }
983
984 txq_id = iwlagn_txq_ctx_activate_free(priv);
985 if (txq_id == -1) {
986 IWL_ERR(priv, "No free aggregation queue available\n");
987 return -ENXIO;
988 }
989
990 spin_lock_irqsave(&priv->sta_lock, flags);
991 tid_data = &priv->stations[sta_id].tid[tid];
992 *ssn = SEQ_TO_SN(tid_data->seq_number);
993 tid_data->agg.txq_id = txq_id;
994 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
995 spin_unlock_irqrestore(&priv->sta_lock, flags);
996
997 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
998 sta_id, tid, *ssn);
999 if (ret)
1000 return ret;
1001
1002 if (tid_data->tfds_in_queue == 0) {
1003 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1004 tid_data->agg.state = IWL_AGG_ON;
1005 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1006 } else {
1007 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1008 tid_data->tfds_in_queue);
1009 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1010 }
1011 return ret;
1012}
1013
1014int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1015{
1016 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1017 struct iwl_tid_data *tid_data;
1018 int write_ptr, read_ptr;
1019 unsigned long flags;
1020
1021 if (!ra) {
1022 IWL_ERR(priv, "ra = NULL\n");
1023 return -EINVAL;
1024 }
1025
1026 tx_fifo_id = get_fifo_from_tid(tid);
1027 if (unlikely(tx_fifo_id < 0))
1028 return tx_fifo_id;
1029
1030 sta_id = iwl_find_station(priv, ra);
1031
1032 if (sta_id == IWL_INVALID_STATION) {
1033 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1034 return -ENXIO;
1035 }
1036
1037 if (priv->stations[sta_id].tid[tid].agg.state ==
1038 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1039 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1040 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1041 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1042 return 0;
1043 }
1044
1045 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1046 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1047
1048 tid_data = &priv->stations[sta_id].tid[tid];
1049 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1050 txq_id = tid_data->agg.txq_id;
1051 write_ptr = priv->txq[txq_id].q.write_ptr;
1052 read_ptr = priv->txq[txq_id].q.read_ptr;
1053
1054 /* The queue is not empty */
1055 if (write_ptr != read_ptr) {
1056 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1057 priv->stations[sta_id].tid[tid].agg.state =
1058 IWL_EMPTYING_HW_QUEUE_DELBA;
1059 return 0;
1060 }
1061
1062 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1063 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1064
1065 spin_lock_irqsave(&priv->lock, flags);
1066 /*
1067 * the only reason this call can fail is queue number out of range,
1068 * which can happen if uCode is reloaded and all the station
1069 * information are lost. if it is outside the range, there is no need
1070 * to deactivate the uCode queue, just return "success" to allow
1071 * mac80211 to clean up it own data.
1072 */
1073 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1074 tx_fifo_id);
1075 spin_unlock_irqrestore(&priv->lock, flags);
1076
1077 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1078
1079 return 0;
1080}
1081
1082int iwlagn_txq_check_empty(struct iwl_priv *priv,
1083 int sta_id, u8 tid, int txq_id)
1084{
1085 struct iwl_queue *q = &priv->txq[txq_id].q;
1086 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1087 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1088
1089 switch (priv->stations[sta_id].tid[tid].agg.state) {
1090 case IWL_EMPTYING_HW_QUEUE_DELBA:
1091 /* We are reclaiming the last packet of the */
1092 /* aggregated HW queue */
1093 if ((txq_id == tid_data->agg.txq_id) &&
1094 (q->read_ptr == q->write_ptr)) {
1095 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1096 int tx_fifo = get_fifo_from_tid(tid);
1097 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1098 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1099 ssn, tx_fifo);
1100 tid_data->agg.state = IWL_AGG_OFF;
1101 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1102 }
1103 break;
1104 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1105 /* We are reclaiming the last packet of the queue */
1106 if (tid_data->tfds_in_queue == 0) {
1107 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1108 tid_data->agg.state = IWL_AGG_ON;
1109 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1110 }
1111 break;
1112 }
1113 return 0;
1114}
1115
1116static void iwlagn_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1117{
1118 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1119 struct ieee80211_sta *sta;
1120 struct iwl_station_priv *sta_priv;
1121
1122 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1123 if (sta) {
1124 sta_priv = (void *)sta->drv_priv;
1125 /* avoid atomic ops if this isn't a client */
1126 if (sta_priv->client &&
1127 atomic_dec_return(&sta_priv->pending_frames) == 0)
1128 ieee80211_sta_block_awake(priv->hw, sta, false);
1129 }
1130
1131 ieee80211_tx_status_irqsafe(priv->hw, skb);
1132}
1133
1134int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1135{
1136 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1137 struct iwl_queue *q = &txq->q;
1138 struct iwl_tx_info *tx_info;
1139 int nfreed = 0;
1140 struct ieee80211_hdr *hdr;
1141
1142 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1143 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1144 "is out of range [0-%d] %d %d.\n", txq_id,
1145 index, q->n_bd, q->write_ptr, q->read_ptr);
1146 return 0;
1147 }
1148
1149 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1150 q->read_ptr != index;
1151 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1152
1153 tx_info = &txq->txb[txq->q.read_ptr];
1154 iwlagn_tx_status(priv, tx_info->skb[0]);
1155
1156 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1157 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1158 nfreed++;
1159 tx_info->skb[0] = NULL;
1160
1161 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1162 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1163
1164 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1165 }
1166 return nfreed;
1167}
1168
1169/**
1170 * iwlagn_tx_status_reply_compressed_ba - Update tx status from block-ack
1171 *
1172 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1173 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1174 */
1175static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1176 struct iwl_ht_agg *agg,
1177 struct iwl_compressed_ba_resp *ba_resp)
1178
1179{
1180 int i, sh, ack;
1181 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1182 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1183 u64 bitmap;
1184 int successes = 0;
1185 struct ieee80211_tx_info *info;
1186
1187 if (unlikely(!agg->wait_for_ba)) {
1188 IWL_ERR(priv, "Received BA when not expected\n");
1189 return -EINVAL;
1190 }
1191
1192 /* Mark that the expected block-ack response arrived */
1193 agg->wait_for_ba = 0;
1194 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1195
1196 /* Calculate shift to align block-ack bits with our Tx window bits */
1197 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1198 if (sh < 0) /* tbw something is wrong with indices */
1199 sh += 0x100;
1200
1201 /* don't use 64-bit values for now */
1202 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1203
1204 if (agg->frame_count > (64 - sh)) {
1205 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1206 return -1;
1207 }
1208
1209 /* check for success or failure according to the
1210 * transmitted bitmap and block-ack bitmap */
1211 bitmap &= agg->bitmap;
1212
1213 /* For each frame attempted in aggregation,
1214 * update driver's record of tx frame's status. */
1215 for (i = 0; i < agg->frame_count ; i++) {
1216 ack = bitmap & (1ULL << i);
1217 successes += !!ack;
1218 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1219 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1220 agg->start_idx + i);
1221 }
1222
1223 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1224 memset(&info->status, 0, sizeof(info->status));
1225 info->flags |= IEEE80211_TX_STAT_ACK;
1226 info->flags |= IEEE80211_TX_STAT_AMPDU;
1227 info->status.ampdu_ack_map = successes;
1228 info->status.ampdu_ack_len = agg->frame_count;
1229 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1230
1231 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1232
1233 return 0;
1234}
1235
1236/**
1237 * translate ucode response to mac80211 tx status control values
1238 */
1239void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
1240 struct ieee80211_tx_info *info)
1241{
1242 struct ieee80211_tx_rate *r = &info->control.rates[0];
1243
1244 info->antenna_sel_tx =
1245 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
1246 if (rate_n_flags & RATE_MCS_HT_MSK)
1247 r->flags |= IEEE80211_TX_RC_MCS;
1248 if (rate_n_flags & RATE_MCS_GF_MSK)
1249 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1250 if (rate_n_flags & RATE_MCS_HT40_MSK)
1251 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1252 if (rate_n_flags & RATE_MCS_DUP_MSK)
1253 r->flags |= IEEE80211_TX_RC_DUP_DATA;
1254 if (rate_n_flags & RATE_MCS_SGI_MSK)
1255 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1256 r->idx = iwlagn_hwrate_to_mac80211_idx(rate_n_flags, info->band);
1257}
1258
1259/**
1260 * iwlagn_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1261 *
1262 * Handles block-acknowledge notification from device, which reports success
1263 * of frames sent via aggregation.
1264 */
1265void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1266 struct iwl_rx_mem_buffer *rxb)
1267{
1268 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1269 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1270 struct iwl_tx_queue *txq = NULL;
1271 struct iwl_ht_agg *agg;
1272 int index;
1273 int sta_id;
1274 int tid;
1275
1276 /* "flow" corresponds to Tx queue */
1277 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1278
1279 /* "ssn" is start of block-ack Tx window, corresponds to index
1280 * (in Tx queue's circular buffer) of first TFD/frame in window */
1281 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1282
1283 if (scd_flow >= priv->hw_params.max_txq_num) {
1284 IWL_ERR(priv,
1285 "BUG_ON scd_flow is bigger than number of queues\n");
1286 return;
1287 }
1288
1289 txq = &priv->txq[scd_flow];
1290 sta_id = ba_resp->sta_id;
1291 tid = ba_resp->tid;
1292 agg = &priv->stations[sta_id].tid[tid].agg;
1293
1294 /* Find index just before block-ack window */
1295 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1296
1297 /* TODO: Need to get this copy more safely - now good for debug */
1298
1299 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1300 "sta_id = %d\n",
1301 agg->wait_for_ba,
1302 (u8 *) &ba_resp->sta_addr_lo32,
1303 ba_resp->sta_id);
1304 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1305 "%d, scd_ssn = %d\n",
1306 ba_resp->tid,
1307 ba_resp->seq_ctl,
1308 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1309 ba_resp->scd_flow,
1310 ba_resp->scd_ssn);
1311 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx\n",
1312 agg->start_idx,
1313 (unsigned long long)agg->bitmap);
1314
1315 /* Update driver's record of ACK vs. not for each frame in window */
1316 iwlagn_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1317
1318 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1319 * block-ack window (we assume that they've been successfully
1320 * transmitted ... if not, it's too late anyway). */
1321 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1322 /* calculate mac80211 ampdu sw queue to wake */
1323 int freed = iwlagn_tx_queue_reclaim(priv, scd_flow, index);
1324 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1325
1326 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1327 priv->mac80211_registered &&
1328 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1329 iwl_wake_queue(priv, txq->swq_id);
1330
1331 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1332 }
1333}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
new file mode 100644
index 000000000000..52ae157968b2
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -0,0 +1,416 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *
28 *****************************************************************************/
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/sched.h>
34
35#include "iwl-dev.h"
36#include "iwl-core.h"
37#include "iwl-io.h"
38#include "iwl-helpers.h"
39#include "iwl-agn-hw.h"
40#include "iwl-agn.h"
41
42static const s8 iwlagn_default_queue_to_tx_fifo[] = {
43 IWL_TX_FIFO_VO,
44 IWL_TX_FIFO_VI,
45 IWL_TX_FIFO_BE,
46 IWL_TX_FIFO_BK,
47 IWLAGN_CMD_FIFO_NUM,
48 IWL_TX_FIFO_UNUSED,
49 IWL_TX_FIFO_UNUSED,
50 IWL_TX_FIFO_UNUSED,
51 IWL_TX_FIFO_UNUSED,
52 IWL_TX_FIFO_UNUSED,
53};
54
55/*
56 * ucode
57 */
58static int iwlagn_load_section(struct iwl_priv *priv, const char *name,
59 struct fw_desc *image, u32 dst_addr)
60{
61 dma_addr_t phy_addr = image->p_addr;
62 u32 byte_cnt = image->len;
63 int ret;
64
65 priv->ucode_write_complete = 0;
66
67 iwl_write_direct32(priv,
68 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
69 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
70
71 iwl_write_direct32(priv,
72 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL), dst_addr);
73
74 iwl_write_direct32(priv,
75 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
76 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
77
78 iwl_write_direct32(priv,
79 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
80 (iwl_get_dma_hi_addr(phy_addr)
81 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
82
83 iwl_write_direct32(priv,
84 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
85 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
86 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
87 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
88
89 iwl_write_direct32(priv,
90 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
91 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
92 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
93 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
94
95 IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
96 ret = wait_event_interruptible_timeout(priv->wait_command_queue,
97 priv->ucode_write_complete, 5 * HZ);
98 if (ret == -ERESTARTSYS) {
99 IWL_ERR(priv, "Could not load the %s uCode section due "
100 "to interrupt\n", name);
101 return ret;
102 }
103 if (!ret) {
104 IWL_ERR(priv, "Could not load the %s uCode section\n",
105 name);
106 return -ETIMEDOUT;
107 }
108
109 return 0;
110}
111
112static int iwlagn_load_given_ucode(struct iwl_priv *priv,
113 struct fw_desc *inst_image,
114 struct fw_desc *data_image)
115{
116 int ret = 0;
117
118 ret = iwlagn_load_section(priv, "INST", inst_image,
119 IWLAGN_RTC_INST_LOWER_BOUND);
120 if (ret)
121 return ret;
122
123 return iwlagn_load_section(priv, "DATA", data_image,
124 IWLAGN_RTC_DATA_LOWER_BOUND);
125}
126
127int iwlagn_load_ucode(struct iwl_priv *priv)
128{
129 int ret = 0;
130
131 /* check whether init ucode should be loaded, or rather runtime ucode */
132 if (priv->ucode_init.len && (priv->ucode_type == UCODE_NONE)) {
133 IWL_DEBUG_INFO(priv, "Init ucode found. Loading init ucode...\n");
134 ret = iwlagn_load_given_ucode(priv,
135 &priv->ucode_init, &priv->ucode_init_data);
136 if (!ret) {
137 IWL_DEBUG_INFO(priv, "Init ucode load complete.\n");
138 priv->ucode_type = UCODE_INIT;
139 }
140 } else {
141 IWL_DEBUG_INFO(priv, "Init ucode not found, or already loaded. "
142 "Loading runtime ucode...\n");
143 ret = iwlagn_load_given_ucode(priv,
144 &priv->ucode_code, &priv->ucode_data);
145 if (!ret) {
146 IWL_DEBUG_INFO(priv, "Runtime ucode load complete.\n");
147 priv->ucode_type = UCODE_RT;
148 }
149 }
150
151 return ret;
152}
153
154#define IWL_UCODE_GET(item) \
155static u32 iwlagn_ucode_get_##item(const struct iwl_ucode_header *ucode,\
156 u32 api_ver) \
157{ \
158 if (api_ver <= 2) \
159 return le32_to_cpu(ucode->u.v1.item); \
160 return le32_to_cpu(ucode->u.v2.item); \
161}
162
163static u32 iwlagn_ucode_get_header_size(u32 api_ver)
164{
165 if (api_ver <= 2)
166 return UCODE_HEADER_SIZE(1);
167 return UCODE_HEADER_SIZE(2);
168}
169
170static u32 iwlagn_ucode_get_build(const struct iwl_ucode_header *ucode,
171 u32 api_ver)
172{
173 if (api_ver <= 2)
174 return 0;
175 return le32_to_cpu(ucode->u.v2.build);
176}
177
178static u8 *iwlagn_ucode_get_data(const struct iwl_ucode_header *ucode,
179 u32 api_ver)
180{
181 if (api_ver <= 2)
182 return (u8 *) ucode->u.v1.data;
183 return (u8 *) ucode->u.v2.data;
184}
185
186IWL_UCODE_GET(inst_size);
187IWL_UCODE_GET(data_size);
188IWL_UCODE_GET(init_size);
189IWL_UCODE_GET(init_data_size);
190IWL_UCODE_GET(boot_size);
191
192struct iwl_ucode_ops iwlagn_ucode = {
193 .get_header_size = iwlagn_ucode_get_header_size,
194 .get_build = iwlagn_ucode_get_build,
195 .get_inst_size = iwlagn_ucode_get_inst_size,
196 .get_data_size = iwlagn_ucode_get_data_size,
197 .get_init_size = iwlagn_ucode_get_init_size,
198 .get_init_data_size = iwlagn_ucode_get_init_data_size,
199 .get_boot_size = iwlagn_ucode_get_boot_size,
200 .get_data = iwlagn_ucode_get_data,
201};
202
203/*
204 * Calibration
205 */
206static int iwlagn_set_Xtal_calib(struct iwl_priv *priv)
207{
208 struct iwl_calib_xtal_freq_cmd cmd;
209 __le16 *xtal_calib =
210 (__le16 *)iwl_eeprom_query_addr(priv, EEPROM_5000_XTAL);
211
212 cmd.hdr.op_code = IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD;
213 cmd.hdr.first_group = 0;
214 cmd.hdr.groups_num = 1;
215 cmd.hdr.data_valid = 1;
216 cmd.cap_pin1 = le16_to_cpu(xtal_calib[0]);
217 cmd.cap_pin2 = le16_to_cpu(xtal_calib[1]);
218 return iwl_calib_set(&priv->calib_results[IWL_CALIB_XTAL],
219 (u8 *)&cmd, sizeof(cmd));
220}
221
222static int iwlagn_send_calib_cfg(struct iwl_priv *priv)
223{
224 struct iwl_calib_cfg_cmd calib_cfg_cmd;
225 struct iwl_host_cmd cmd = {
226 .id = CALIBRATION_CFG_CMD,
227 .len = sizeof(struct iwl_calib_cfg_cmd),
228 .data = &calib_cfg_cmd,
229 };
230
231 memset(&calib_cfg_cmd, 0, sizeof(calib_cfg_cmd));
232 calib_cfg_cmd.ucd_calib_cfg.once.is_enable = IWL_CALIB_INIT_CFG_ALL;
233 calib_cfg_cmd.ucd_calib_cfg.once.start = IWL_CALIB_INIT_CFG_ALL;
234 calib_cfg_cmd.ucd_calib_cfg.once.send_res = IWL_CALIB_INIT_CFG_ALL;
235 calib_cfg_cmd.ucd_calib_cfg.flags = IWL_CALIB_INIT_CFG_ALL;
236
237 return iwl_send_cmd(priv, &cmd);
238}
239
240void iwlagn_rx_calib_result(struct iwl_priv *priv,
241 struct iwl_rx_mem_buffer *rxb)
242{
243 struct iwl_rx_packet *pkt = rxb_addr(rxb);
244 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
245 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
246 int index;
247
248 /* reduce the size of the length field itself */
249 len -= 4;
250
251 /* Define the order in which the results will be sent to the runtime
252 * uCode. iwl_send_calib_results sends them in a row according to
253 * their index. We sort them here
254 */
255 switch (hdr->op_code) {
256 case IWL_PHY_CALIBRATE_DC_CMD:
257 index = IWL_CALIB_DC;
258 break;
259 case IWL_PHY_CALIBRATE_LO_CMD:
260 index = IWL_CALIB_LO;
261 break;
262 case IWL_PHY_CALIBRATE_TX_IQ_CMD:
263 index = IWL_CALIB_TX_IQ;
264 break;
265 case IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD:
266 index = IWL_CALIB_TX_IQ_PERD;
267 break;
268 case IWL_PHY_CALIBRATE_BASE_BAND_CMD:
269 index = IWL_CALIB_BASE_BAND;
270 break;
271 default:
272 IWL_ERR(priv, "Unknown calibration notification %d\n",
273 hdr->op_code);
274 return;
275 }
276 iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
277}
278
279void iwlagn_rx_calib_complete(struct iwl_priv *priv,
280 struct iwl_rx_mem_buffer *rxb)
281{
282 IWL_DEBUG_INFO(priv, "Init. calibration is completed, restarting fw.\n");
283 queue_work(priv->workqueue, &priv->restart);
284}
285
286void iwlagn_init_alive_start(struct iwl_priv *priv)
287{
288 int ret = 0;
289
290 /* Check alive response for "valid" sign from uCode */
291 if (priv->card_alive_init.is_valid != UCODE_VALID_OK) {
292 /* We had an error bringing up the hardware, so take it
293 * all the way back down so we can try again */
294 IWL_DEBUG_INFO(priv, "Initialize Alive failed.\n");
295 goto restart;
296 }
297
298 /* initialize uCode was loaded... verify inst image.
299 * This is a paranoid check, because we would not have gotten the
300 * "initialize" alive if code weren't properly loaded. */
301 if (iwl_verify_ucode(priv)) {
302 /* Runtime instruction load was bad;
303 * take it all the way back down so we can try again */
304 IWL_DEBUG_INFO(priv, "Bad \"initialize\" uCode load.\n");
305 goto restart;
306 }
307
308 ret = priv->cfg->ops->lib->alive_notify(priv);
309 if (ret) {
310 IWL_WARN(priv,
311 "Could not complete ALIVE transition: %d\n", ret);
312 goto restart;
313 }
314
315 iwlagn_send_calib_cfg(priv);
316 return;
317
318restart:
319 /* real restart (first load init_ucode) */
320 queue_work(priv->workqueue, &priv->restart);
321}
322
323int iwlagn_alive_notify(struct iwl_priv *priv)
324{
325 u32 a;
326 unsigned long flags;
327 int i, chan;
328 u32 reg_val;
329
330 spin_lock_irqsave(&priv->lock, flags);
331
332 priv->scd_base_addr = iwl_read_prph(priv, IWL50_SCD_SRAM_BASE_ADDR);
333 a = priv->scd_base_addr + IWL50_SCD_CONTEXT_DATA_OFFSET;
334 for (; a < priv->scd_base_addr + IWL50_SCD_TX_STTS_BITMAP_OFFSET;
335 a += 4)
336 iwl_write_targ_mem(priv, a, 0);
337 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
338 a += 4)
339 iwl_write_targ_mem(priv, a, 0);
340 for (; a < priv->scd_base_addr +
341 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
342 iwl_write_targ_mem(priv, a, 0);
343
344 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
345 priv->scd_bc_tbls.dma >> 10);
346
347 /* Enable DMA channel */
348 for (chan = 0; chan < FH50_TCSR_CHNL_NUM ; chan++)
349 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
350 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
351 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
352
353 /* Update FH chicken bits */
354 reg_val = iwl_read_direct32(priv, FH_TX_CHICKEN_BITS_REG);
355 iwl_write_direct32(priv, FH_TX_CHICKEN_BITS_REG,
356 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
357
358 iwl_write_prph(priv, IWL50_SCD_QUEUECHAIN_SEL,
359 IWL50_SCD_QUEUECHAIN_SEL_ALL(priv->hw_params.max_txq_num));
360 iwl_write_prph(priv, IWL50_SCD_AGGR_SEL, 0);
361
362 /* initiate the queues */
363 for (i = 0; i < priv->hw_params.max_txq_num; i++) {
364 iwl_write_prph(priv, IWL50_SCD_QUEUE_RDPTR(i), 0);
365 iwl_write_direct32(priv, HBUS_TARG_WRPTR, 0 | (i << 8));
366 iwl_write_targ_mem(priv, priv->scd_base_addr +
367 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i), 0);
368 iwl_write_targ_mem(priv, priv->scd_base_addr +
369 IWL50_SCD_CONTEXT_QUEUE_OFFSET(i) +
370 sizeof(u32),
371 ((SCD_WIN_SIZE <<
372 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
373 IWL50_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
374 ((SCD_FRAME_LIMIT <<
375 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
376 IWL50_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
377 }
378
379 iwl_write_prph(priv, IWL50_SCD_INTERRUPT_MASK,
380 IWL_MASK(0, priv->hw_params.max_txq_num));
381
382 /* Activate all Tx DMA/FIFO channels */
383 priv->cfg->ops->lib->txq_set_sched(priv, IWL_MASK(0, 7));
384
385 iwlagn_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
386
387 /* make sure all queue are not stopped */
388 memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
389 for (i = 0; i < 4; i++)
390 atomic_set(&priv->queue_stop_count[i], 0);
391
392 /* reset to 0 to enable all the queue first */
393 priv->txq_ctx_active_msk = 0;
394 /* map qos queues to fifos one-to-one */
395 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo) != 10);
396
397 for (i = 0; i < ARRAY_SIZE(iwlagn_default_queue_to_tx_fifo); i++) {
398 int ac = iwlagn_default_queue_to_tx_fifo[i];
399
400 iwl_txq_ctx_activate(priv, i);
401
402 if (ac == IWL_TX_FIFO_UNUSED)
403 continue;
404
405 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
406 }
407
408 spin_unlock_irqrestore(&priv->lock, flags);
409
410 iwl_send_wimax_coex(priv);
411
412 iwlagn_set_Xtal_calib(priv);
413 iwl_send_calib_results(priv);
414
415 return 0;
416}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index bdff56583e11..310bc6aeb99b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -55,6 +55,7 @@
55#include "iwl-helpers.h" 55#include "iwl-helpers.h"
56#include "iwl-sta.h" 56#include "iwl-sta.h"
57#include "iwl-calib.h" 57#include "iwl-calib.h"
58#include "iwl-agn.h"
58 59
59 60
60/****************************************************************************** 61/******************************************************************************
@@ -83,13 +84,6 @@ MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
83MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
84MODULE_ALIAS("iwl4965"); 85MODULE_ALIAS("iwl4965");
85 86
86/*************** STATION TABLE MANAGEMENT ****
87 * mac80211 should be examined to determine if sta_info is duplicating
88 * the functionality provided here
89 */
90
91/**************************************************************/
92
93/** 87/**
94 * iwl_commit_rxon - commit staging_rxon to hardware 88 * iwl_commit_rxon - commit staging_rxon to hardware
95 * 89 *
@@ -144,9 +138,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
144 return 0; 138 return 0;
145 } 139 }
146 140
147 /* station table will be cleared */
148 priv->assoc_station_added = 0;
149
150 /* If we are currently associated and the new config requires 141 /* If we are currently associated and the new config requires
151 * an RXON_ASSOC and the new config wants the associated mask enabled, 142 * an RXON_ASSOC and the new config wants the associated mask enabled,
152 * we must clear the associated from the active configuration 143 * we must clear the associated from the active configuration
@@ -166,6 +157,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
166 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret); 157 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
167 return ret; 158 return ret;
168 } 159 }
160 iwl_clear_ucode_stations(priv, false);
161 iwl_restore_stations(priv);
162 ret = iwl_restore_default_wep_keys(priv);
163 if (ret) {
164 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
165 return ret;
166 }
169 } 167 }
170 168
171 IWL_DEBUG_INFO(priv, "Sending RXON\n" 169 IWL_DEBUG_INFO(priv, "Sending RXON\n"
@@ -179,9 +177,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
179 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto); 177 iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
180 178
181 /* Apply the new configuration 179 /* Apply the new configuration
182 * RXON unassoc clears the station table in uCode, send it before 180 * RXON unassoc clears the station table in uCode so restoration of
183 * we add the bcast station. If assoc bit is set, we will send RXON 181 * stations is needed after it (the RXON command) completes
184 * after having added the bcast and bssid station.
185 */ 182 */
186 if (!new_assoc) { 183 if (!new_assoc) {
187 ret = iwl_send_cmd_pdu(priv, REPLY_RXON, 184 ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
@@ -190,35 +187,19 @@ int iwl_commit_rxon(struct iwl_priv *priv)
190 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret); 187 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
191 return ret; 188 return ret;
192 } 189 }
190 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
193 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon)); 191 memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
192 iwl_clear_ucode_stations(priv, false);
193 iwl_restore_stations(priv);
194 ret = iwl_restore_default_wep_keys(priv);
195 if (ret) {
196 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
197 return ret;
198 }
194 } 199 }
195 200
196 iwl_clear_stations_table(priv);
197
198 priv->start_calib = 0; 201 priv->start_calib = 0;
199
200 /* Add the broadcast address so we can send broadcast frames */
201 priv->cfg->ops->lib->add_bcast_station(priv);
202
203
204 /* If we have set the ASSOC_MSK and we are in BSS mode then
205 * add the IWL_AP_ID to the station rate table */
206 if (new_assoc) { 202 if (new_assoc) {
207 if (priv->iw_mode == NL80211_IFTYPE_STATION) {
208 ret = iwl_rxon_add_station(priv,
209 priv->active_rxon.bssid_addr, 1);
210 if (ret == IWL_INVALID_STATION) {
211 IWL_ERR(priv,
212 "Error adding AP address for TX.\n");
213 return -EIO;
214 }
215 priv->assoc_station_added = 1;
216 if (priv->default_wep_key &&
217 iwl_send_static_wepkey_cmd(priv, 0))
218 IWL_ERR(priv,
219 "Could not send WEP static key.\n");
220 }
221
222 /* 203 /*
223 * allow CTS-to-self if possible for new association. 204 * allow CTS-to-self if possible for new association.
224 * this is relevant only for 5000 series and up, 205 * this is relevant only for 5000 series and up,
@@ -907,10 +888,10 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
907 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] = 888 priv->rx_handlers[MISSED_BEACONS_NOTIFICATION] =
908 iwl_rx_missed_beacon_notif; 889 iwl_rx_missed_beacon_notif;
909 /* Rx handlers */ 890 /* Rx handlers */
910 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwl_rx_reply_rx_phy; 891 priv->rx_handlers[REPLY_RX_PHY_CMD] = iwlagn_rx_reply_rx_phy;
911 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwl_rx_reply_rx; 892 priv->rx_handlers[REPLY_RX_MPDU_CMD] = iwlagn_rx_reply_rx;
912 /* block ack */ 893 /* block ack */
913 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwl_rx_reply_compressed_ba; 894 priv->rx_handlers[REPLY_COMPRESSED_BA] = iwlagn_rx_reply_compressed_ba;
914 /* Set up hardware specific Rx handlers */ 895 /* Set up hardware specific Rx handlers */
915 priv->cfg->ops->lib->rx_handler_setup(priv); 896 priv->cfg->ops->lib->rx_handler_setup(priv);
916} 897}
@@ -1038,7 +1019,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
1038 count++; 1019 count++;
1039 if (count >= 8) { 1020 if (count >= 8) {
1040 rxq->read = i; 1021 rxq->read = i;
1041 iwl_rx_replenish_now(priv); 1022 iwlagn_rx_replenish_now(priv);
1042 count = 0; 1023 count = 0;
1043 } 1024 }
1044 } 1025 }
@@ -1047,9 +1028,9 @@ void iwl_rx_handle(struct iwl_priv *priv)
1047 /* Backtrack one entry */ 1028 /* Backtrack one entry */
1048 rxq->read = i; 1029 rxq->read = i;
1049 if (fill_rx) 1030 if (fill_rx)
1050 iwl_rx_replenish_now(priv); 1031 iwlagn_rx_replenish_now(priv);
1051 else 1032 else
1052 iwl_rx_queue_restock(priv); 1033 iwlagn_rx_queue_restock(priv);
1053} 1034}
1054 1035
1055/* call this function to flush any scheduled tasklet */ 1036/* call this function to flush any scheduled tasklet */
@@ -1267,9 +1248,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1267 * hardware bugs here by ACKing all the possible interrupts so that 1248 * hardware bugs here by ACKing all the possible interrupts so that
1268 * interrupt coalescing can still be achieved. 1249 * interrupt coalescing can still be achieved.
1269 */ 1250 */
1270 iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask); 1251 iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
1271 1252
1272 inta = priv->inta; 1253 inta = priv->_agn.inta;
1273 1254
1274#ifdef CONFIG_IWLWIFI_DEBUG 1255#ifdef CONFIG_IWLWIFI_DEBUG
1275 if (iwl_get_debug_level(priv) & IWL_DL_ISR) { 1256 if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
@@ -1282,8 +1263,8 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1282 1263
1283 spin_unlock_irqrestore(&priv->lock, flags); 1264 spin_unlock_irqrestore(&priv->lock, flags);
1284 1265
1285 /* saved interrupt in inta variable now we can reset priv->inta */ 1266 /* saved interrupt in inta variable now we can reset priv->_agn.inta */
1286 priv->inta = 0; 1267 priv->_agn.inta = 0;
1287 1268
1288 /* Now service all interrupt bits discovered above. */ 1269 /* Now service all interrupt bits discovered above. */
1289 if (inta & CSR_INT_BIT_HW_ERR) { 1270 if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1448,6 +1429,60 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1448 iwl_enable_interrupts(priv); 1429 iwl_enable_interrupts(priv);
1449} 1430}
1450 1431
1432/* the threshold ratio of actual_ack_cnt to expected_ack_cnt in percent */
1433#define ACK_CNT_RATIO (50)
1434#define BA_TIMEOUT_CNT (5)
1435#define BA_TIMEOUT_MAX (16)
1436
1437/**
1438 * iwl_good_ack_health - checks for ACK count ratios, BA timeout retries.
1439 *
1440 * When the ACK count ratio is 0 and aggregated BA timeout retries exceeding
1441 * the BA_TIMEOUT_MAX, reload firmware and bring system back to normal
1442 * operation state.
1443 */
1444bool iwl_good_ack_health(struct iwl_priv *priv,
1445 struct iwl_rx_packet *pkt)
1446{
1447 bool rc = true;
1448 int actual_ack_cnt_delta, expected_ack_cnt_delta;
1449 int ba_timeout_delta;
1450
1451 actual_ack_cnt_delta =
1452 le32_to_cpu(pkt->u.stats.tx.actual_ack_cnt) -
1453 le32_to_cpu(priv->statistics.tx.actual_ack_cnt);
1454 expected_ack_cnt_delta =
1455 le32_to_cpu(pkt->u.stats.tx.expected_ack_cnt) -
1456 le32_to_cpu(priv->statistics.tx.expected_ack_cnt);
1457 ba_timeout_delta =
1458 le32_to_cpu(pkt->u.stats.tx.agg.ba_timeout) -
1459 le32_to_cpu(priv->statistics.tx.agg.ba_timeout);
1460 if ((priv->_agn.agg_tids_count > 0) &&
1461 (expected_ack_cnt_delta > 0) &&
1462 (((actual_ack_cnt_delta * 100) / expected_ack_cnt_delta)
1463 < ACK_CNT_RATIO) &&
1464 (ba_timeout_delta > BA_TIMEOUT_CNT)) {
1465 IWL_DEBUG_RADIO(priv, "actual_ack_cnt delta = %d,"
1466 " expected_ack_cnt = %d\n",
1467 actual_ack_cnt_delta, expected_ack_cnt_delta);
1468
1469#ifdef CONFIG_IWLWIFI_DEBUG
1470 IWL_DEBUG_RADIO(priv, "rx_detected_cnt delta = %d\n",
1471 priv->delta_statistics.tx.rx_detected_cnt);
1472 IWL_DEBUG_RADIO(priv,
1473 "ack_or_ba_timeout_collision delta = %d\n",
1474 priv->delta_statistics.tx.
1475 ack_or_ba_timeout_collision);
1476#endif
1477 IWL_DEBUG_RADIO(priv, "agg ba_timeout delta = %d\n",
1478 ba_timeout_delta);
1479 if (!actual_ack_cnt_delta &&
1480 (ba_timeout_delta >= BA_TIMEOUT_MAX))
1481 rc = false;
1482 }
1483 return rc;
1484}
1485
1451 1486
1452/****************************************************************************** 1487/******************************************************************************
1453 * 1488 *
@@ -1809,6 +1844,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1809 u32 data2, line; 1844 u32 data2, line;
1810 u32 desc, time, count, base, data1; 1845 u32 desc, time, count, base, data1;
1811 u32 blink1, blink2, ilink1, ilink2; 1846 u32 blink1, blink2, ilink1, ilink2;
1847 u32 pc, hcmd;
1812 1848
1813 if (priv->ucode_type == UCODE_INIT) 1849 if (priv->ucode_type == UCODE_INIT)
1814 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr); 1850 base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
@@ -1831,6 +1867,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1831 } 1867 }
1832 1868
1833 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32)); 1869 desc = iwl_read_targ_mem(priv, base + 1 * sizeof(u32));
1870 pc = iwl_read_targ_mem(priv, base + 2 * sizeof(u32));
1834 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32)); 1871 blink1 = iwl_read_targ_mem(priv, base + 3 * sizeof(u32));
1835 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32)); 1872 blink2 = iwl_read_targ_mem(priv, base + 4 * sizeof(u32));
1836 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32)); 1873 ilink1 = iwl_read_targ_mem(priv, base + 5 * sizeof(u32));
@@ -1839,6 +1876,7 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1839 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32)); 1876 data2 = iwl_read_targ_mem(priv, base + 8 * sizeof(u32));
1840 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1877 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1841 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1878 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1879 hcmd = iwl_read_targ_mem(priv, base + 22 * sizeof(u32));
1842 1880
1843 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line, 1881 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
1844 blink1, blink2, ilink1, ilink2); 1882 blink1, blink2, ilink1, ilink2);
@@ -1847,10 +1885,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1847 "data1 data2 line\n"); 1885 "data1 data2 line\n");
1848 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 1886 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
1849 desc_lookup(desc), desc, time, data1, data2, line); 1887 desc_lookup(desc), desc, time, data1, data2, line);
1850 IWL_ERR(priv, "blink1 blink2 ilink1 ilink2\n"); 1888 IWL_ERR(priv, "pc blink1 blink2 ilink1 ilink2 hcmd\n");
1851 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X\n", blink1, blink2, 1889 IWL_ERR(priv, "0x%05X 0x%05X 0x%05X 0x%05X 0x%05X 0x%05X\n",
1852 ilink1, ilink2); 1890 pc, blink1, blink2, ilink1, ilink2, hcmd);
1853
1854} 1891}
1855 1892
1856#define EVENT_START_OFFSET (4 * sizeof(u32)) 1893#define EVENT_START_OFFSET (4 * sizeof(u32))
@@ -1966,9 +2003,6 @@ static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1966 return pos; 2003 return pos;
1967} 2004}
1968 2005
1969/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1970#define MAX_EVENT_LOG_SIZE (512)
1971
1972#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20) 2006#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
1973 2007
1974int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 2008int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -2001,16 +2035,16 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2001 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 2035 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
2002 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 2036 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
2003 2037
2004 if (capacity > MAX_EVENT_LOG_SIZE) { 2038 if (capacity > priv->cfg->max_event_log_size) {
2005 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 2039 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
2006 capacity, MAX_EVENT_LOG_SIZE); 2040 capacity, priv->cfg->max_event_log_size);
2007 capacity = MAX_EVENT_LOG_SIZE; 2041 capacity = priv->cfg->max_event_log_size;
2008 } 2042 }
2009 2043
2010 if (next_entry > MAX_EVENT_LOG_SIZE) { 2044 if (next_entry > priv->cfg->max_event_log_size) {
2011 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 2045 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
2012 next_entry, MAX_EVENT_LOG_SIZE); 2046 next_entry, priv->cfg->max_event_log_size);
2013 next_entry = MAX_EVENT_LOG_SIZE; 2047 next_entry = priv->cfg->max_event_log_size;
2014 } 2048 }
2015 2049
2016 size = num_wraps ? capacity : next_entry; 2050 size = num_wraps ? capacity : next_entry;
@@ -2095,7 +2129,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2095 goto restart; 2129 goto restart;
2096 } 2130 }
2097 2131
2098 iwl_clear_stations_table(priv);
2099 ret = priv->cfg->ops->lib->alive_notify(priv); 2132 ret = priv->cfg->ops->lib->alive_notify(priv);
2100 if (ret) { 2133 if (ret) {
2101 IWL_WARN(priv, 2134 IWL_WARN(priv,
@@ -2106,13 +2139,19 @@ static void iwl_alive_start(struct iwl_priv *priv)
2106 /* After the ALIVE response, we can send host commands to the uCode */ 2139 /* After the ALIVE response, we can send host commands to the uCode */
2107 set_bit(STATUS_ALIVE, &priv->status); 2140 set_bit(STATUS_ALIVE, &priv->status);
2108 2141
2142 if (priv->cfg->ops->lib->recover_from_tx_stall) {
2143 /* Enable timer to monitor the driver queues */
2144 mod_timer(&priv->monitor_recover,
2145 jiffies +
2146 msecs_to_jiffies(priv->cfg->monitor_recover_period));
2147 }
2148
2109 if (iwl_is_rfkill(priv)) 2149 if (iwl_is_rfkill(priv))
2110 return; 2150 return;
2111 2151
2112 ieee80211_wake_queues(priv->hw); 2152 ieee80211_wake_queues(priv->hw);
2113 2153
2114 priv->active_rate = priv->rates_mask; 2154 priv->active_rate = IWL_RATES_MASK;
2115 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2116 2155
2117 /* Configure Tx antenna selection based on H/W config */ 2156 /* Configure Tx antenna selection based on H/W config */
2118 if (priv->cfg->ops->hcmd->set_tx_ant) 2157 if (priv->cfg->ops->hcmd->set_tx_ant)
@@ -2152,19 +2191,9 @@ static void iwl_alive_start(struct iwl_priv *priv)
2152 wake_up_interruptible(&priv->wait_command_queue); 2191 wake_up_interruptible(&priv->wait_command_queue);
2153 2192
2154 iwl_power_update_mode(priv, true); 2193 iwl_power_update_mode(priv, true);
2155 2194 IWL_DEBUG_INFO(priv, "Updated power mode\n");
2156 /* reassociate for ADHOC mode */
2157 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
2158 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
2159 priv->vif);
2160 if (beacon)
2161 iwl_mac_beacon_update(priv->hw, beacon);
2162 }
2163 2195
2164 2196
2165 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2166 iwl_set_mode(priv, priv->iw_mode);
2167
2168 return; 2197 return;
2169 2198
2170 restart: 2199 restart:
@@ -2183,7 +2212,7 @@ static void __iwl_down(struct iwl_priv *priv)
2183 if (!exit_pending) 2212 if (!exit_pending)
2184 set_bit(STATUS_EXIT_PENDING, &priv->status); 2213 set_bit(STATUS_EXIT_PENDING, &priv->status);
2185 2214
2186 iwl_clear_stations_table(priv); 2215 iwl_clear_ucode_stations(priv, true);
2187 2216
2188 /* Unblock any waiting calls */ 2217 /* Unblock any waiting calls */
2189 wake_up_interruptible_all(&priv->wait_command_queue); 2218 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2231,8 +2260,8 @@ static void __iwl_down(struct iwl_priv *priv)
2231 /* device going down, Stop using ICT table */ 2260 /* device going down, Stop using ICT table */
2232 iwl_disable_ict(priv); 2261 iwl_disable_ict(priv);
2233 2262
2234 iwl_txq_ctx_stop(priv); 2263 iwlagn_txq_ctx_stop(priv);
2235 iwl_rxq_stop(priv); 2264 iwlagn_rxq_stop(priv);
2236 2265
2237 /* Power-down device's busmaster DMA clocks */ 2266 /* Power-down device's busmaster DMA clocks */
2238 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT); 2267 iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
@@ -2292,7 +2321,7 @@ static int iwl_prepare_card_hw(struct iwl_priv *priv)
2292{ 2321{
2293 int ret = 0; 2322 int ret = 0;
2294 2323
2295 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter \n"); 2324 IWL_DEBUG_INFO(priv, "iwl_prepare_card_hw enter\n");
2296 2325
2297 ret = iwl_set_hw_ready(priv); 2326 ret = iwl_set_hw_ready(priv);
2298 if (priv->hw_ready) 2327 if (priv->hw_ready)
@@ -2353,7 +2382,7 @@ static int __iwl_up(struct iwl_priv *priv)
2353 2382
2354 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 2383 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
2355 2384
2356 ret = iwl_hw_nic_init(priv); 2385 ret = iwlagn_hw_nic_init(priv);
2357 if (ret) { 2386 if (ret) {
2358 IWL_ERR(priv, "Unable to init nic\n"); 2387 IWL_ERR(priv, "Unable to init nic\n");
2359 return ret; 2388 return ret;
@@ -2380,8 +2409,6 @@ static int __iwl_up(struct iwl_priv *priv)
2380 2409
2381 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2410 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2382 2411
2383 iwl_clear_stations_table(priv);
2384
2385 /* load bootstrap state machine, 2412 /* load bootstrap state machine,
2386 * load bootstrap program into processor's memory, 2413 * load bootstrap program into processor's memory,
2387 * prepare to load the "initialize" uCode */ 2414 * prepare to load the "initialize" uCode */
@@ -2505,7 +2532,7 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
2505 return; 2532 return;
2506 2533
2507 mutex_lock(&priv->mutex); 2534 mutex_lock(&priv->mutex);
2508 iwl_rx_replenish(priv); 2535 iwlagn_rx_replenish(priv);
2509 mutex_unlock(&priv->mutex); 2536 mutex_unlock(&priv->mutex);
2510} 2537}
2511 2538
@@ -2515,17 +2542,12 @@ void iwl_post_associate(struct iwl_priv *priv)
2515{ 2542{
2516 struct ieee80211_conf *conf = NULL; 2543 struct ieee80211_conf *conf = NULL;
2517 int ret = 0; 2544 int ret = 0;
2518 unsigned long flags;
2519 2545
2520 if (priv->iw_mode == NL80211_IFTYPE_AP) { 2546 if (priv->iw_mode == NL80211_IFTYPE_AP) {
2521 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 2547 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2522 return; 2548 return;
2523 } 2549 }
2524 2550
2525 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2526 priv->assoc_id, priv->active_rxon.bssid_addr);
2527
2528
2529 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2551 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2530 return; 2552 return;
2531 2553
@@ -2577,6 +2599,9 @@ void iwl_post_associate(struct iwl_priv *priv)
2577 2599
2578 iwlcore_commit_rxon(priv); 2600 iwlcore_commit_rxon(priv);
2579 2601
2602 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2603 priv->assoc_id, priv->active_rxon.bssid_addr);
2604
2580 switch (priv->iw_mode) { 2605 switch (priv->iw_mode) {
2581 case NL80211_IFTYPE_STATION: 2606 case NL80211_IFTYPE_STATION:
2582 break; 2607 break;
@@ -2586,7 +2611,7 @@ void iwl_post_associate(struct iwl_priv *priv)
2586 /* assume default assoc id */ 2611 /* assume default assoc id */
2587 priv->assoc_id = 1; 2612 priv->assoc_id = 1;
2588 2613
2589 iwl_rxon_add_station(priv, priv->bssid, 0); 2614 iwl_add_local_station(priv, priv->bssid, true);
2590 iwl_send_beacon_cmd(priv); 2615 iwl_send_beacon_cmd(priv);
2591 2616
2592 break; 2617 break;
@@ -2597,13 +2622,6 @@ void iwl_post_associate(struct iwl_priv *priv)
2597 break; 2622 break;
2598 } 2623 }
2599 2624
2600 if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
2601 priv->assoc_station_added = 1;
2602
2603 spin_lock_irqsave(&priv->lock, flags);
2604 iwl_activate_qos(priv, 0);
2605 spin_unlock_irqrestore(&priv->lock, flags);
2606
2607 /* the chain noise calibration will enabled PM upon completion 2625 /* the chain noise calibration will enabled PM upon completion
2608 * If chain noise has already been run, then we need to enable 2626 * If chain noise has already been run, then we need to enable
2609 * power management here */ 2627 * power management here */
@@ -2770,7 +2788,7 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2770 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len, 2788 IWL_DEBUG_TX(priv, "dev->xmit(%d bytes) at rate 0x%02x\n", skb->len,
2771 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate); 2789 ieee80211_get_tx_rate(hw, IEEE80211_SKB_CB(skb))->bitrate);
2772 2790
2773 if (iwl_tx_skb(priv, skb)) 2791 if (iwlagn_tx_skb(priv, skb))
2774 dev_kfree_skb_any(skb); 2792 dev_kfree_skb_any(skb);
2775 2793
2776 IWL_DEBUG_MACDUMP(priv, "leave\n"); 2794 IWL_DEBUG_MACDUMP(priv, "leave\n");
@@ -2780,7 +2798,6 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2780void iwl_config_ap(struct iwl_priv *priv) 2798void iwl_config_ap(struct iwl_priv *priv)
2781{ 2799{
2782 int ret = 0; 2800 int ret = 0;
2783 unsigned long flags;
2784 2801
2785 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2802 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2786 return; 2803 return;
@@ -2832,10 +2849,6 @@ void iwl_config_ap(struct iwl_priv *priv)
2832 /* restore RXON assoc */ 2849 /* restore RXON assoc */
2833 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 2850 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
2834 iwlcore_commit_rxon(priv); 2851 iwlcore_commit_rxon(priv);
2835 iwl_reset_qos(priv);
2836 spin_lock_irqsave(&priv->lock, flags);
2837 iwl_activate_qos(priv, 1);
2838 spin_unlock_irqrestore(&priv->lock, flags);
2839 iwl_add_bcast_station(priv); 2852 iwl_add_bcast_station(priv);
2840 } 2853 }
2841 iwl_send_beacon_cmd(priv); 2854 iwl_send_beacon_cmd(priv);
@@ -2890,14 +2903,14 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2890 2903
2891 mutex_lock(&priv->mutex); 2904 mutex_lock(&priv->mutex);
2892 iwl_scan_cancel_timeout(priv, 100); 2905 iwl_scan_cancel_timeout(priv, 100);
2893 mutex_unlock(&priv->mutex);
2894 2906
2895 /* If we are getting WEP group key and we didn't receive any key mapping 2907 /*
2908 * If we are getting WEP group key and we didn't receive any key mapping
2896 * so far, we are in legacy wep mode (group key only), otherwise we are 2909 * so far, we are in legacy wep mode (group key only), otherwise we are
2897 * in 1X mode. 2910 * in 1X mode.
2898 * In legacy wep mode, we use another host command to the uCode */ 2911 * In legacy wep mode, we use another host command to the uCode.
2899 if (key->alg == ALG_WEP && sta_id == priv->hw_params.bcast_sta_id && 2912 */
2900 priv->iw_mode != NL80211_IFTYPE_AP) { 2913 if (key->alg == ALG_WEP && !sta && vif->type != NL80211_IFTYPE_AP) {
2901 if (cmd == SET_KEY) 2914 if (cmd == SET_KEY)
2902 is_default_wep_key = !priv->key_mapping_key; 2915 is_default_wep_key = !priv->key_mapping_key;
2903 else 2916 else
@@ -2926,6 +2939,7 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
2926 ret = -EINVAL; 2939 ret = -EINVAL;
2927 } 2940 }
2928 2941
2942 mutex_unlock(&priv->mutex);
2929 IWL_DEBUG_MAC80211(priv, "leave\n"); 2943 IWL_DEBUG_MAC80211(priv, "leave\n");
2930 2944
2931 return ret; 2945 return ret;
@@ -2958,10 +2972,21 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
2958 return ret; 2972 return ret;
2959 case IEEE80211_AMPDU_TX_START: 2973 case IEEE80211_AMPDU_TX_START:
2960 IWL_DEBUG_HT(priv, "start Tx\n"); 2974 IWL_DEBUG_HT(priv, "start Tx\n");
2961 return iwl_tx_agg_start(priv, sta->addr, tid, ssn); 2975 ret = iwlagn_tx_agg_start(priv, sta->addr, tid, ssn);
2976 if (ret == 0) {
2977 priv->_agn.agg_tids_count++;
2978 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
2979 priv->_agn.agg_tids_count);
2980 }
2981 return ret;
2962 case IEEE80211_AMPDU_TX_STOP: 2982 case IEEE80211_AMPDU_TX_STOP:
2963 IWL_DEBUG_HT(priv, "stop Tx\n"); 2983 IWL_DEBUG_HT(priv, "stop Tx\n");
2964 ret = iwl_tx_agg_stop(priv, sta->addr, tid); 2984 ret = iwlagn_tx_agg_stop(priv, sta->addr, tid);
2985 if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
2986 priv->_agn.agg_tids_count--;
2987 IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
2988 priv->_agn.agg_tids_count);
2989 }
2965 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 2990 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2966 return 0; 2991 return 0;
2967 else 2992 else
@@ -2998,18 +3023,7 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
2998 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3023 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
2999 int sta_id; 3024 int sta_id;
3000 3025
3001 /*
3002 * TODO: We really should use this callback to
3003 * actually maintain the station table in
3004 * the device.
3005 */
3006
3007 switch (cmd) { 3026 switch (cmd) {
3008 case STA_NOTIFY_ADD:
3009 atomic_set(&sta_priv->pending_frames, 0);
3010 if (vif->type == NL80211_IFTYPE_AP)
3011 sta_priv->client = true;
3012 break;
3013 case STA_NOTIFY_SLEEP: 3027 case STA_NOTIFY_SLEEP:
3014 WARN_ON(!sta_priv->client); 3028 WARN_ON(!sta_priv->client);
3015 sta_priv->asleep = true; 3029 sta_priv->asleep = true;
@@ -3030,6 +3044,40 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
3030 } 3044 }
3031} 3045}
3032 3046
3047static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3048 struct ieee80211_vif *vif,
3049 struct ieee80211_sta *sta)
3050{
3051 struct iwl_priv *priv = hw->priv;
3052 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3053 bool is_ap = priv->iw_mode == NL80211_IFTYPE_STATION;
3054 int ret;
3055 u8 sta_id;
3056
3057 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3058 sta->addr);
3059
3060 atomic_set(&sta_priv->pending_frames, 0);
3061 if (vif->type == NL80211_IFTYPE_AP)
3062 sta_priv->client = true;
3063
3064 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
3065 &sta_id);
3066 if (ret) {
3067 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3068 sta->addr, ret);
3069 /* Should we return success if return code is EEXIST ? */
3070 return ret;
3071 }
3072
3073 /* Initialize rate scaling */
3074 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3075 sta->addr);
3076 iwl_rs_rate_init(priv, sta, sta_id);
3077
3078 return ret;
3079}
3080
3033/***************************************************************************** 3081/*****************************************************************************
3034 * 3082 *
3035 * sysfs attributes 3083 * sysfs attributes
@@ -3130,87 +3178,6 @@ static ssize_t store_tx_power(struct device *d,
3130 3178
3131static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power); 3179static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
3132 3180
3133static ssize_t show_flags(struct device *d,
3134 struct device_attribute *attr, char *buf)
3135{
3136 struct iwl_priv *priv = dev_get_drvdata(d);
3137
3138 return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
3139}
3140
3141static ssize_t store_flags(struct device *d,
3142 struct device_attribute *attr,
3143 const char *buf, size_t count)
3144{
3145 struct iwl_priv *priv = dev_get_drvdata(d);
3146 unsigned long val;
3147 u32 flags;
3148 int ret = strict_strtoul(buf, 0, &val);
3149 if (ret)
3150 return ret;
3151 flags = (u32)val;
3152
3153 mutex_lock(&priv->mutex);
3154 if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
3155 /* Cancel any currently running scans... */
3156 if (iwl_scan_cancel_timeout(priv, 100))
3157 IWL_WARN(priv, "Could not cancel scan.\n");
3158 else {
3159 IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
3160 priv->staging_rxon.flags = cpu_to_le32(flags);
3161 iwlcore_commit_rxon(priv);
3162 }
3163 }
3164 mutex_unlock(&priv->mutex);
3165
3166 return count;
3167}
3168
3169static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
3170
3171static ssize_t show_filter_flags(struct device *d,
3172 struct device_attribute *attr, char *buf)
3173{
3174 struct iwl_priv *priv = dev_get_drvdata(d);
3175
3176 return sprintf(buf, "0x%04X\n",
3177 le32_to_cpu(priv->active_rxon.filter_flags));
3178}
3179
3180static ssize_t store_filter_flags(struct device *d,
3181 struct device_attribute *attr,
3182 const char *buf, size_t count)
3183{
3184 struct iwl_priv *priv = dev_get_drvdata(d);
3185 unsigned long val;
3186 u32 filter_flags;
3187 int ret = strict_strtoul(buf, 0, &val);
3188 if (ret)
3189 return ret;
3190 filter_flags = (u32)val;
3191
3192 mutex_lock(&priv->mutex);
3193 if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
3194 /* Cancel any currently running scans... */
3195 if (iwl_scan_cancel_timeout(priv, 100))
3196 IWL_WARN(priv, "Could not cancel scan.\n");
3197 else {
3198 IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
3199 "0x%04X\n", filter_flags);
3200 priv->staging_rxon.filter_flags =
3201 cpu_to_le32(filter_flags);
3202 iwlcore_commit_rxon(priv);
3203 }
3204 }
3205 mutex_unlock(&priv->mutex);
3206
3207 return count;
3208}
3209
3210static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
3211 store_filter_flags);
3212
3213
3214static ssize_t show_statistics(struct device *d, 3181static ssize_t show_statistics(struct device *d,
3215 struct device_attribute *attr, char *buf) 3182 struct device_attribute *attr, char *buf)
3216{ 3183{
@@ -3316,6 +3283,13 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
3316 priv->ucode_trace.data = (unsigned long)priv; 3283 priv->ucode_trace.data = (unsigned long)priv;
3317 priv->ucode_trace.function = iwl_bg_ucode_trace; 3284 priv->ucode_trace.function = iwl_bg_ucode_trace;
3318 3285
3286 if (priv->cfg->ops->lib->recover_from_tx_stall) {
3287 init_timer(&priv->monitor_recover);
3288 priv->monitor_recover.data = (unsigned long)priv;
3289 priv->monitor_recover.function =
3290 priv->cfg->ops->lib->recover_from_tx_stall;
3291 }
3292
3319 if (!priv->cfg->use_isr_legacy) 3293 if (!priv->cfg->use_isr_legacy)
3320 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3294 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3321 iwl_irq_tasklet, (unsigned long)priv); 3295 iwl_irq_tasklet, (unsigned long)priv);
@@ -3336,6 +3310,8 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
3336 cancel_work_sync(&priv->beacon_update); 3310 cancel_work_sync(&priv->beacon_update);
3337 del_timer_sync(&priv->statistics_periodic); 3311 del_timer_sync(&priv->statistics_periodic);
3338 del_timer_sync(&priv->ucode_trace); 3312 del_timer_sync(&priv->ucode_trace);
3313 if (priv->cfg->ops->lib->recover_from_tx_stall)
3314 del_timer_sync(&priv->monitor_recover);
3339} 3315}
3340 3316
3341static void iwl_init_hw_rates(struct iwl_priv *priv, 3317static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3373,9 +3349,6 @@ static int iwl_init_drv(struct iwl_priv *priv)
3373 mutex_init(&priv->mutex); 3349 mutex_init(&priv->mutex);
3374 mutex_init(&priv->sync_cmd_mutex); 3350 mutex_init(&priv->sync_cmd_mutex);
3375 3351
3376 /* Clear the driver's (not device's) station table */
3377 iwl_clear_stations_table(priv);
3378
3379 priv->ieee_channels = NULL; 3352 priv->ieee_channels = NULL;
3380 priv->ieee_rates = NULL; 3353 priv->ieee_rates = NULL;
3381 priv->band = IEEE80211_BAND_2GHZ; 3354 priv->band = IEEE80211_BAND_2GHZ;
@@ -3383,6 +3356,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
3383 priv->iw_mode = NL80211_IFTYPE_STATION; 3356 priv->iw_mode = NL80211_IFTYPE_STATION;
3384 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC; 3357 priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
3385 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3358 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3359 priv->_agn.agg_tids_count = 0;
3386 3360
3387 /* initialize force reset */ 3361 /* initialize force reset */
3388 priv->force_reset[IWL_RF_RESET].reset_duration = 3362 priv->force_reset[IWL_RF_RESET].reset_duration =
@@ -3396,16 +3370,10 @@ static int iwl_init_drv(struct iwl_priv *priv)
3396 3370
3397 iwl_init_scan_params(priv); 3371 iwl_init_scan_params(priv);
3398 3372
3399 iwl_reset_qos(priv);
3400
3401 priv->qos_data.qos_active = 0;
3402 priv->qos_data.qos_cap.val = 0;
3403
3404 priv->rates_mask = IWL_RATES_MASK;
3405 /* Set the tx_power_user_lmt to the lowest power level 3373 /* Set the tx_power_user_lmt to the lowest power level
3406 * this value will get overwritten by channel max power avg 3374 * this value will get overwritten by channel max power avg
3407 * from eeprom */ 3375 * from eeprom */
3408 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN; 3376 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3409 3377
3410 ret = iwl_init_channel_map(priv); 3378 ret = iwl_init_channel_map(priv);
3411 if (ret) { 3379 if (ret) {
@@ -3437,8 +3405,6 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
3437} 3405}
3438 3406
3439static struct attribute *iwl_sysfs_entries[] = { 3407static struct attribute *iwl_sysfs_entries[] = {
3440 &dev_attr_flags.attr,
3441 &dev_attr_filter_flags.attr,
3442 &dev_attr_statistics.attr, 3408 &dev_attr_statistics.attr,
3443 &dev_attr_temperature.attr, 3409 &dev_attr_temperature.attr,
3444 &dev_attr_tx_power.attr, 3410 &dev_attr_tx_power.attr,
@@ -3471,6 +3437,8 @@ static struct ieee80211_ops iwl_hw_ops = {
3471 .ampdu_action = iwl_mac_ampdu_action, 3437 .ampdu_action = iwl_mac_ampdu_action,
3472 .hw_scan = iwl_mac_hw_scan, 3438 .hw_scan = iwl_mac_hw_scan,
3473 .sta_notify = iwl_mac_sta_notify, 3439 .sta_notify = iwl_mac_sta_notify,
3440 .sta_add = iwlagn_mac_sta_add,
3441 .sta_remove = iwl_mac_sta_remove,
3474}; 3442};
3475 3443
3476static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 3444static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -3574,7 +3542,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3574 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); 3542 iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
3575 3543
3576 iwl_hw_detect(priv); 3544 iwl_hw_detect(priv);
3577 IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n", 3545 IWL_INFO(priv, "Detected %s, REV=0x%X\n",
3578 priv->cfg->name, priv->hw_rev); 3546 priv->cfg->name, priv->hw_rev);
3579 3547
3580 /* We disable the RETRY_TIMEOUT register (0x41) to keep 3548 /* We disable the RETRY_TIMEOUT register (0x41) to keep
@@ -3752,10 +3720,9 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3752 iwl_dealloc_ucode_pci(priv); 3720 iwl_dealloc_ucode_pci(priv);
3753 3721
3754 if (priv->rxq.bd) 3722 if (priv->rxq.bd)
3755 iwl_rx_queue_free(priv, &priv->rxq); 3723 iwlagn_rx_queue_free(priv, &priv->rxq);
3756 iwl_hw_txq_ctx_free(priv); 3724 iwlagn_hw_txq_ctx_free(priv);
3757 3725
3758 iwl_clear_stations_table(priv);
3759 iwl_eeprom_free(priv); 3726 iwl_eeprom_free(priv);
3760 3727
3761 3728
@@ -3869,6 +3836,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
3869 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)}, 3836 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
3870 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 3837 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
3871 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 3838 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
3839 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000i_g2_2agn_cfg)},
3872 3840
3873/* 6x50 WiFi/WiMax Series */ 3841/* 6x50 WiFi/WiMax Series */
3874 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 3842 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
@@ -3951,3 +3919,33 @@ module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3951MODULE_PARM_DESC(debug, "debug output mask"); 3919MODULE_PARM_DESC(debug, "debug output mask");
3952#endif 3920#endif
3953 3921
3922module_param_named(swcrypto50, iwlagn_mod_params.sw_crypto, bool, S_IRUGO);
3923MODULE_PARM_DESC(swcrypto50,
3924 "using crypto in software (default 0 [hardware]) (deprecated)");
3925module_param_named(swcrypto, iwlagn_mod_params.sw_crypto, int, S_IRUGO);
3926MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
3927module_param_named(queues_num50,
3928 iwlagn_mod_params.num_of_queues, int, S_IRUGO);
3929MODULE_PARM_DESC(queues_num50,
3930 "number of hw queues in 50xx series (deprecated)");
3931module_param_named(queues_num, iwlagn_mod_params.num_of_queues, int, S_IRUGO);
3932MODULE_PARM_DESC(queues_num, "number of hw queues.");
3933module_param_named(11n_disable50, iwlagn_mod_params.disable_11n, int, S_IRUGO);
3934MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality (deprecated)");
3935module_param_named(11n_disable, iwlagn_mod_params.disable_11n, int, S_IRUGO);
3936MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
3937module_param_named(amsdu_size_8K50, iwlagn_mod_params.amsdu_size_8K,
3938 int, S_IRUGO);
3939MODULE_PARM_DESC(amsdu_size_8K50,
3940 "enable 8K amsdu size in 50XX series (deprecated)");
3941module_param_named(amsdu_size_8K, iwlagn_mod_params.amsdu_size_8K,
3942 int, S_IRUGO);
3943MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
3944module_param_named(fw_restart50, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3945MODULE_PARM_DESC(fw_restart50,
3946 "restart firmware in case of error (deprecated)");
3947module_param_named(fw_restart, iwlagn_mod_params.restart_fw, int, S_IRUGO);
3948MODULE_PARM_DESC(fw_restart, "restart firmware in case of error");
3949module_param_named(
3950 disable_hw_scan, iwlagn_mod_params.disable_hw_scan, int, S_IRUGO);
3951MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
new file mode 100644
index 000000000000..5d3142287e14
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -0,0 +1,174 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_agn_h__
64#define __iwl_agn_h__
65
66#include "iwl-dev.h"
67
68extern struct iwl_mod_params iwlagn_mod_params;
69extern struct iwl_ucode_ops iwlagn_ucode;
70extern struct iwl_hcmd_ops iwlagn_hcmd;
71extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
72
73int iwl_reset_ict(struct iwl_priv *priv);
74void iwl_disable_ict(struct iwl_priv *priv);
75int iwl_alloc_isr_ict(struct iwl_priv *priv);
76void iwl_free_isr_ict(struct iwl_priv *priv);
77irqreturn_t iwl_isr_ict(int irq, void *data);
78bool iwl_good_ack_health(struct iwl_priv *priv,
79 struct iwl_rx_packet *pkt);
80
81/* tx queue */
82void iwlagn_set_wr_ptrs(struct iwl_priv *priv,
83 int txq_id, u32 index);
84void iwlagn_tx_queue_set_status(struct iwl_priv *priv,
85 struct iwl_tx_queue *txq,
86 int tx_fifo_id, int scd_retry);
87void iwlagn_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
88 struct iwl_tx_queue *txq,
89 u16 byte_cnt);
90void iwlagn_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
91 struct iwl_tx_queue *txq);
92int iwlagn_txq_agg_enable(struct iwl_priv *priv, int txq_id,
93 int tx_fifo, int sta_id, int tid, u16 ssn_idx);
94int iwlagn_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
95 u16 ssn_idx, u8 tx_fifo);
96void iwlagn_txq_set_sched(struct iwl_priv *priv, u32 mask);
97
98/* uCode */
99int iwlagn_load_ucode(struct iwl_priv *priv);
100void iwlagn_rx_calib_result(struct iwl_priv *priv,
101 struct iwl_rx_mem_buffer *rxb);
102void iwlagn_rx_calib_complete(struct iwl_priv *priv,
103 struct iwl_rx_mem_buffer *rxb);
104void iwlagn_init_alive_start(struct iwl_priv *priv);
105int iwlagn_alive_notify(struct iwl_priv *priv);
106
107/* lib */
108void iwl_check_abort_status(struct iwl_priv *priv,
109 u8 frame_count, u32 status);
110void iwlagn_rx_handler_setup(struct iwl_priv *priv);
111void iwlagn_setup_deferred_work(struct iwl_priv *priv);
112int iwlagn_hw_valid_rtc_data_addr(u32 addr);
113int iwlagn_send_tx_power(struct iwl_priv *priv);
114void iwlagn_temperature(struct iwl_priv *priv);
115u16 iwlagn_eeprom_calib_version(struct iwl_priv *priv);
116const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
117 size_t offset);
118void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
119int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
120int iwlagn_hw_nic_init(struct iwl_priv *priv);
121
122/* rx */
123void iwlagn_rx_queue_restock(struct iwl_priv *priv);
124void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority);
125void iwlagn_rx_replenish(struct iwl_priv *priv);
126void iwlagn_rx_replenish_now(struct iwl_priv *priv);
127void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
128int iwlagn_rxq_stop(struct iwl_priv *priv);
129int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
130void iwlagn_rx_reply_rx(struct iwl_priv *priv,
131 struct iwl_rx_mem_buffer *rxb);
132void iwlagn_rx_reply_rx_phy(struct iwl_priv *priv,
133 struct iwl_rx_mem_buffer *rxb);
134
135/* tx */
136void iwlagn_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
137 struct ieee80211_tx_info *info);
138int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
139int iwlagn_tx_agg_start(struct iwl_priv *priv,
140 const u8 *ra, u16 tid, u16 *ssn);
141int iwlagn_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
142int iwlagn_txq_check_empty(struct iwl_priv *priv,
143 int sta_id, u8 tid, int txq_id);
144void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
145 struct iwl_rx_mem_buffer *rxb);
146int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
147void iwlagn_hw_txq_ctx_free(struct iwl_priv *priv);
148int iwlagn_txq_ctx_alloc(struct iwl_priv *priv);
149void iwlagn_txq_ctx_reset(struct iwl_priv *priv);
150void iwlagn_txq_ctx_stop(struct iwl_priv *priv);
151
152static inline u32 iwl_tx_status_to_mac80211(u32 status)
153{
154 status &= TX_STATUS_MSK;
155
156 switch (status) {
157 case TX_STATUS_SUCCESS:
158 case TX_STATUS_DIRECT_DONE:
159 return IEEE80211_TX_STAT_ACK;
160 case TX_STATUS_FAIL_DEST_PS:
161 return IEEE80211_TX_STAT_TX_FILTERED;
162 default:
163 return 0;
164 }
165}
166
167static inline bool iwl_is_tx_success(u32 status)
168{
169 status &= TX_STATUS_MSK;
170 return (status == TX_STATUS_SUCCESS) ||
171 (status == TX_STATUS_DIRECT_DONE);
172}
173
174#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 8b516c5ff0bb..f1fd00b1a65d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -593,7 +593,7 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
593 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time); 593 IWL_DEBUG_CALIB(priv, "rx_enable_time = %u usecs\n", rx_enable_time);
594 594
595 if (!rx_enable_time) { 595 if (!rx_enable_time) {
596 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0! \n"); 596 IWL_DEBUG_CALIB(priv, "<< RX Enable Time == 0!\n");
597 return; 597 return;
598 } 598 }
599 599
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 6383d9f8c9b3..d830086ca195 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -106,7 +106,7 @@ enum {
106 REPLY_TX = 0x1c, 106 REPLY_TX = 0x1c,
107 REPLY_RATE_SCALE = 0x47, /* 3945 only */ 107 REPLY_RATE_SCALE = 0x47, /* 3945 only */
108 REPLY_LEDS_CMD = 0x48, 108 REPLY_LEDS_CMD = 0x48,
109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* for 4965 and up */
110 110
111 /* WiMAX coexistence */ 111 /* WiMAX coexistence */
112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */ 112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
@@ -512,8 +512,9 @@ struct iwl_init_alive_resp {
512 * 512 *
513 * Entries without timestamps contain only event_id and data. 513 * Entries without timestamps contain only event_id and data.
514 * 514 *
515 *
515 * 2) error_event_table_ptr indicates base of the error log. This contains 516 * 2) error_event_table_ptr indicates base of the error log. This contains
516 * information about any uCode error that occurs. For 4965, the format 517 * information about any uCode error that occurs. For agn, the format
517 * of the error log is: 518 * of the error log is:
518 * 519 *
519 * __le32 valid; (nonzero) valid, (0) log is empty 520 * __le32 valid; (nonzero) valid, (0) log is empty
@@ -529,6 +530,30 @@ struct iwl_init_alive_resp {
529 * __le32 bcon_time; beacon timer 530 * __le32 bcon_time; beacon timer
530 * __le32 tsf_low; network timestamp function timer 531 * __le32 tsf_low; network timestamp function timer
531 * __le32 tsf_hi; network timestamp function timer 532 * __le32 tsf_hi; network timestamp function timer
533 * __le32 gp1; GP1 timer register
534 * __le32 gp2; GP2 timer register
535 * __le32 gp3; GP3 timer register
536 * __le32 ucode_ver; uCode version
537 * __le32 hw_ver; HW Silicon version
538 * __le32 brd_ver; HW board version
539 * __le32 log_pc; log program counter
540 * __le32 frame_ptr; frame pointer
541 * __le32 stack_ptr; stack pointer
542 * __le32 hcmd; last host command
543 * __le32 isr0; isr status register LMPM_NIC_ISR0: rxtx_flag
544 * __le32 isr1; isr status register LMPM_NIC_ISR1: host_flag
545 * __le32 isr2; isr status register LMPM_NIC_ISR2: enc_flag
546 * __le32 isr3; isr status register LMPM_NIC_ISR3: time_flag
547 * __le32 isr4; isr status register LMPM_NIC_ISR4: wico interrupt
548 * __le32 isr_pref; isr status register LMPM_NIC_PREF_STAT
549 * __le32 wait_event; wait event() caller address
550 * __le32 l2p_control; L2pControlField
551 * __le32 l2p_duration; L2pDurationField
552 * __le32 l2p_mhvalid; L2pMhValidBits
553 * __le32 l2p_addr_match; L2pAddrMatchStat
554 * __le32 lmpm_pmg_sel; indicate which clocks are turned on (LMPM_PMG_SEL)
555 * __le32 u_timestamp; indicate when the date and time of the compilation
556 * __le32 reserved;
532 * 557 *
533 * The Linux driver can print both logs to the system log when a uCode error 558 * The Linux driver can print both logs to the system log when a uCode error
534 * occurs. 559 * occurs.
@@ -1637,7 +1662,7 @@ struct iwl_tx_cmd {
1637 struct ieee80211_hdr hdr[0]; 1662 struct ieee80211_hdr hdr[0];
1638} __attribute__ ((packed)); 1663} __attribute__ ((packed));
1639 1664
1640/* TX command response is sent after *all* transmission attempts. 1665/* TX command response is sent after *3945* transmission attempts.
1641 * 1666 *
1642 * NOTES: 1667 * NOTES:
1643 * 1668 *
@@ -1665,24 +1690,65 @@ struct iwl_tx_cmd {
1665 * control line. Receiving is still allowed in this case. 1690 * control line. Receiving is still allowed in this case.
1666 */ 1691 */
1667enum { 1692enum {
1693 TX_3945_STATUS_SUCCESS = 0x01,
1694 TX_3945_STATUS_DIRECT_DONE = 0x02,
1695 TX_3945_STATUS_FAIL_SHORT_LIMIT = 0x82,
1696 TX_3945_STATUS_FAIL_LONG_LIMIT = 0x83,
1697 TX_3945_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1698 TX_3945_STATUS_FAIL_MGMNT_ABORT = 0x85,
1699 TX_3945_STATUS_FAIL_NEXT_FRAG = 0x86,
1700 TX_3945_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1701 TX_3945_STATUS_FAIL_DEST_PS = 0x88,
1702 TX_3945_STATUS_FAIL_ABORTED = 0x89,
1703 TX_3945_STATUS_FAIL_BT_RETRY = 0x8a,
1704 TX_3945_STATUS_FAIL_STA_INVALID = 0x8b,
1705 TX_3945_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1706 TX_3945_STATUS_FAIL_TID_DISABLE = 0x8d,
1707 TX_3945_STATUS_FAIL_FRAME_FLUSHED = 0x8e,
1708 TX_3945_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1709 TX_3945_STATUS_FAIL_TX_LOCKED = 0x90,
1710 TX_3945_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91,
1711};
1712
1713/*
1714 * TX command response is sent after *agn* transmission attempts.
1715 *
1716 * both postpone and abort status are expected behavior from uCode. there is
1717 * no special operation required from driver; except for RFKILL_FLUSH,
1718 * which required tx flush host command to flush all the tx frames in queues
1719 */
1720enum {
1668 TX_STATUS_SUCCESS = 0x01, 1721 TX_STATUS_SUCCESS = 0x01,
1669 TX_STATUS_DIRECT_DONE = 0x02, 1722 TX_STATUS_DIRECT_DONE = 0x02,
1723 /* postpone TX */
1724 TX_STATUS_POSTPONE_DELAY = 0x40,
1725 TX_STATUS_POSTPONE_FEW_BYTES = 0x41,
1726 TX_STATUS_POSTPONE_BT_PRIO = 0x42,
1727 TX_STATUS_POSTPONE_QUIET_PERIOD = 0x43,
1728 TX_STATUS_POSTPONE_CALC_TTAK = 0x44,
1729 /* abort TX */
1730 TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY = 0x81,
1670 TX_STATUS_FAIL_SHORT_LIMIT = 0x82, 1731 TX_STATUS_FAIL_SHORT_LIMIT = 0x82,
1671 TX_STATUS_FAIL_LONG_LIMIT = 0x83, 1732 TX_STATUS_FAIL_LONG_LIMIT = 0x83,
1672 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84, 1733 TX_STATUS_FAIL_FIFO_UNDERRUN = 0x84,
1673 TX_STATUS_FAIL_MGMNT_ABORT = 0x85, 1734 TX_STATUS_FAIL_DRAIN_FLOW = 0x85,
1674 TX_STATUS_FAIL_NEXT_FRAG = 0x86, 1735 TX_STATUS_FAIL_RFKILL_FLUSH = 0x86,
1675 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87, 1736 TX_STATUS_FAIL_LIFE_EXPIRE = 0x87,
1676 TX_STATUS_FAIL_DEST_PS = 0x88, 1737 TX_STATUS_FAIL_DEST_PS = 0x88,
1677 TX_STATUS_FAIL_ABORTED = 0x89, 1738 TX_STATUS_FAIL_HOST_ABORTED = 0x89,
1678 TX_STATUS_FAIL_BT_RETRY = 0x8a, 1739 TX_STATUS_FAIL_BT_RETRY = 0x8a,
1679 TX_STATUS_FAIL_STA_INVALID = 0x8b, 1740 TX_STATUS_FAIL_STA_INVALID = 0x8b,
1680 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c, 1741 TX_STATUS_FAIL_FRAG_DROPPED = 0x8c,
1681 TX_STATUS_FAIL_TID_DISABLE = 0x8d, 1742 TX_STATUS_FAIL_TID_DISABLE = 0x8d,
1682 TX_STATUS_FAIL_FRAME_FLUSHED = 0x8e, 1743 TX_STATUS_FAIL_FIFO_FLUSHED = 0x8e,
1683 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f, 1744 TX_STATUS_FAIL_INSUFFICIENT_CF_POLL = 0x8f,
1684 TX_STATUS_FAIL_TX_LOCKED = 0x90, 1745 /* uCode drop due to FW drop request */
1685 TX_STATUS_FAIL_NO_BEACON_ON_RADAR = 0x91, 1746 TX_STATUS_FAIL_FW_DROP = 0x90,
1747 /*
1748 * uCode drop due to station color mismatch
1749 * between tx command and station table
1750 */
1751 TX_STATUS_FAIL_STA_COLOR_MISMATCH_DROP = 0x91,
1686}; 1752};
1687 1753
1688#define TX_PACKET_MODE_REGULAR 0x0000 1754#define TX_PACKET_MODE_REGULAR 0x0000
@@ -1704,30 +1770,6 @@ enum {
1704 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */ 1770 TX_ABORT_REQUIRED_MSK = 0x80000000, /* bits 31:31 */
1705}; 1771};
1706 1772
1707static inline u32 iwl_tx_status_to_mac80211(u32 status)
1708{
1709 status &= TX_STATUS_MSK;
1710
1711 switch (status) {
1712 case TX_STATUS_SUCCESS:
1713 case TX_STATUS_DIRECT_DONE:
1714 return IEEE80211_TX_STAT_ACK;
1715 case TX_STATUS_FAIL_DEST_PS:
1716 return IEEE80211_TX_STAT_TX_FILTERED;
1717 default:
1718 return 0;
1719 }
1720}
1721
1722static inline bool iwl_is_tx_success(u32 status)
1723{
1724 status &= TX_STATUS_MSK;
1725 return (status == TX_STATUS_SUCCESS) ||
1726 (status == TX_STATUS_DIRECT_DONE);
1727}
1728
1729
1730
1731/* ******************************* 1773/* *******************************
1732 * TX aggregation status 1774 * TX aggregation status
1733 ******************************* */ 1775 ******************************* */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 049b652bcb5e..b75808aad1ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -66,7 +66,7 @@ MODULE_LICENSE("GPL");
66 */ 66 */
67static bool bt_coex_active = true; 67static bool bt_coex_active = true;
68module_param(bt_coex_active, bool, S_IRUGO); 68module_param(bt_coex_active, bool, S_IRUGO);
69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n"); 69MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist");
70 70
71static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 71static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
72 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP, 72 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
@@ -115,8 +115,6 @@ static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
115u32 iwl_debug_level; 115u32 iwl_debug_level;
116EXPORT_SYMBOL(iwl_debug_level); 116EXPORT_SYMBOL(iwl_debug_level);
117 117
118static irqreturn_t iwl_isr(int irq, void *data);
119
120/* 118/*
121 * Parameter order: 119 * Parameter order:
122 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate 120 * rate, ht rate, prev rate, next rate, prev tgg rate, next tgg rate
@@ -143,30 +141,6 @@ const struct iwl_rate_info iwl_rates[IWL_RATE_COUNT] = {
143}; 141};
144EXPORT_SYMBOL(iwl_rates); 142EXPORT_SYMBOL(iwl_rates);
145 143
146/**
147 * translate ucode response to mac80211 tx status control values
148 */
149void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
150 struct ieee80211_tx_info *info)
151{
152 struct ieee80211_tx_rate *r = &info->control.rates[0];
153
154 info->antenna_sel_tx =
155 ((rate_n_flags & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS);
156 if (rate_n_flags & RATE_MCS_HT_MSK)
157 r->flags |= IEEE80211_TX_RC_MCS;
158 if (rate_n_flags & RATE_MCS_GF_MSK)
159 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
160 if (rate_n_flags & RATE_MCS_HT40_MSK)
161 r->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
162 if (rate_n_flags & RATE_MCS_DUP_MSK)
163 r->flags |= IEEE80211_TX_RC_DUP_DATA;
164 if (rate_n_flags & RATE_MCS_SGI_MSK)
165 r->flags |= IEEE80211_TX_RC_SHORT_GI;
166 r->idx = iwl_hwrate_to_mac80211_idx(rate_n_flags, info->band);
167}
168EXPORT_SYMBOL(iwl_hwrate_to_tx_control);
169
170int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) 144int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
171{ 145{
172 int idx = 0; 146 int idx = 0;
@@ -198,27 +172,6 @@ int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
198} 172}
199EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx); 173EXPORT_SYMBOL(iwl_hwrate_to_plcp_idx);
200 174
201int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
202{
203 int idx = 0;
204 int band_offset = 0;
205
206 /* HT rate format: mac80211 wants an MCS number, which is just LSB */
207 if (rate_n_flags & RATE_MCS_HT_MSK) {
208 idx = (rate_n_flags & 0xff);
209 return idx;
210 /* Legacy rate format, search for match in table */
211 } else {
212 if (band == IEEE80211_BAND_5GHZ)
213 band_offset = IWL_FIRST_OFDM_RATE;
214 for (idx = band_offset; idx < IWL_RATE_COUNT_LEGACY; idx++)
215 if (iwl_rates[idx].plcp == (rate_n_flags & 0xFF))
216 return idx - band_offset;
217 }
218
219 return -1;
220}
221
222u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant) 175u8 iwl_toggle_tx_ant(struct iwl_priv *priv, u8 ant)
223{ 176{
224 int i; 177 int i;
@@ -268,74 +221,16 @@ void iwl_hw_detect(struct iwl_priv *priv)
268} 221}
269EXPORT_SYMBOL(iwl_hw_detect); 222EXPORT_SYMBOL(iwl_hw_detect);
270 223
271int iwl_hw_nic_init(struct iwl_priv *priv)
272{
273 unsigned long flags;
274 struct iwl_rx_queue *rxq = &priv->rxq;
275 int ret;
276
277 /* nic_init */
278 spin_lock_irqsave(&priv->lock, flags);
279 priv->cfg->ops->lib->apm_ops.init(priv);
280
281 /* Set interrupt coalescing calibration timer to default (512 usecs) */
282 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
283
284 spin_unlock_irqrestore(&priv->lock, flags);
285
286 ret = priv->cfg->ops->lib->apm_ops.set_pwr_src(priv, IWL_PWR_SRC_VMAIN);
287
288 priv->cfg->ops->lib->apm_ops.config(priv);
289
290 /* Allocate the RX queue, or reset if it is already allocated */
291 if (!rxq->bd) {
292 ret = iwl_rx_queue_alloc(priv);
293 if (ret) {
294 IWL_ERR(priv, "Unable to initialize Rx queue\n");
295 return -ENOMEM;
296 }
297 } else
298 iwl_rx_queue_reset(priv, rxq);
299
300 iwl_rx_replenish(priv);
301
302 iwl_rx_init(priv, rxq);
303
304 spin_lock_irqsave(&priv->lock, flags);
305
306 rxq->need_update = 1;
307 iwl_rx_queue_update_write_ptr(priv, rxq);
308
309 spin_unlock_irqrestore(&priv->lock, flags);
310
311 /* Allocate or reset and init all Tx and Command queues */
312 if (!priv->txq) {
313 ret = iwl_txq_ctx_alloc(priv);
314 if (ret)
315 return ret;
316 } else
317 iwl_txq_ctx_reset(priv);
318
319 set_bit(STATUS_INIT, &priv->status);
320
321 return 0;
322}
323EXPORT_SYMBOL(iwl_hw_nic_init);
324
325/* 224/*
326 * QoS support 225 * QoS support
327*/ 226*/
328void iwl_activate_qos(struct iwl_priv *priv, u8 force) 227static void iwl_update_qos(struct iwl_priv *priv)
329{ 228{
330 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 229 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
331 return; 230 return;
332 231
333 priv->qos_data.def_qos_parm.qos_flags = 0; 232 priv->qos_data.def_qos_parm.qos_flags = 0;
334 233
335 if (priv->qos_data.qos_cap.q_AP.queue_request &&
336 !priv->qos_data.qos_cap.q_AP.txop_request)
337 priv->qos_data.def_qos_parm.qos_flags |=
338 QOS_PARAM_FLG_TXOP_TYPE_MSK;
339 if (priv->qos_data.qos_active) 234 if (priv->qos_data.qos_active)
340 priv->qos_data.def_qos_parm.qos_flags |= 235 priv->qos_data.def_qos_parm.qos_flags |=
341 QOS_PARAM_FLG_UPDATE_EDCA_MSK; 236 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
@@ -343,118 +238,14 @@ void iwl_activate_qos(struct iwl_priv *priv, u8 force)
343 if (priv->current_ht_config.is_ht) 238 if (priv->current_ht_config.is_ht)
344 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; 239 priv->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
345 240
346 if (force || iwl_is_associated(priv)) { 241 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
347 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n", 242 priv->qos_data.qos_active,
348 priv->qos_data.qos_active, 243 priv->qos_data.def_qos_parm.qos_flags);
349 priv->qos_data.def_qos_parm.qos_flags);
350 244
351 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM, 245 iwl_send_cmd_pdu_async(priv, REPLY_QOS_PARAM,
352 sizeof(struct iwl_qosparam_cmd), 246 sizeof(struct iwl_qosparam_cmd),
353 &priv->qos_data.def_qos_parm, NULL); 247 &priv->qos_data.def_qos_parm, NULL);
354 }
355} 248}
356EXPORT_SYMBOL(iwl_activate_qos);
357
358/*
359 * AC CWmin CW max AIFSN TXOP Limit TXOP Limit
360 * (802.11b) (802.11a/g)
361 * AC_BK 15 1023 7 0 0
362 * AC_BE 15 1023 3 0 0
363 * AC_VI 7 15 2 6.016ms 3.008ms
364 * AC_VO 3 7 2 3.264ms 1.504ms
365 */
366void iwl_reset_qos(struct iwl_priv *priv)
367{
368 u16 cw_min = 15;
369 u16 cw_max = 1023;
370 u8 aifs = 2;
371 bool is_legacy = false;
372 unsigned long flags;
373 int i;
374
375 spin_lock_irqsave(&priv->lock, flags);
376 /* QoS always active in AP and ADHOC mode
377 * In STA mode wait for association
378 */
379 if (priv->iw_mode == NL80211_IFTYPE_ADHOC ||
380 priv->iw_mode == NL80211_IFTYPE_AP)
381 priv->qos_data.qos_active = 1;
382 else
383 priv->qos_data.qos_active = 0;
384
385 /* check for legacy mode */
386 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC &&
387 (priv->active_rate & IWL_OFDM_RATES_MASK) == 0) ||
388 (priv->iw_mode == NL80211_IFTYPE_STATION &&
389 (priv->staging_rxon.flags & RXON_FLG_SHORT_SLOT_MSK) == 0)) {
390 cw_min = 31;
391 is_legacy = 1;
392 }
393
394 if (priv->qos_data.qos_active)
395 aifs = 3;
396
397 /* AC_BE */
398 priv->qos_data.def_qos_parm.ac[0].cw_min = cpu_to_le16(cw_min);
399 priv->qos_data.def_qos_parm.ac[0].cw_max = cpu_to_le16(cw_max);
400 priv->qos_data.def_qos_parm.ac[0].aifsn = aifs;
401 priv->qos_data.def_qos_parm.ac[0].edca_txop = 0;
402 priv->qos_data.def_qos_parm.ac[0].reserved1 = 0;
403
404 if (priv->qos_data.qos_active) {
405 /* AC_BK */
406 i = 1;
407 priv->qos_data.def_qos_parm.ac[i].cw_min = cpu_to_le16(cw_min);
408 priv->qos_data.def_qos_parm.ac[i].cw_max = cpu_to_le16(cw_max);
409 priv->qos_data.def_qos_parm.ac[i].aifsn = 7;
410 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
411 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
412
413 /* AC_VI */
414 i = 2;
415 priv->qos_data.def_qos_parm.ac[i].cw_min =
416 cpu_to_le16((cw_min + 1) / 2 - 1);
417 priv->qos_data.def_qos_parm.ac[i].cw_max =
418 cpu_to_le16(cw_min);
419 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
420 if (is_legacy)
421 priv->qos_data.def_qos_parm.ac[i].edca_txop =
422 cpu_to_le16(6016);
423 else
424 priv->qos_data.def_qos_parm.ac[i].edca_txop =
425 cpu_to_le16(3008);
426 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
427
428 /* AC_VO */
429 i = 3;
430 priv->qos_data.def_qos_parm.ac[i].cw_min =
431 cpu_to_le16((cw_min + 1) / 4 - 1);
432 priv->qos_data.def_qos_parm.ac[i].cw_max =
433 cpu_to_le16((cw_min + 1) / 2 - 1);
434 priv->qos_data.def_qos_parm.ac[i].aifsn = 2;
435 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
436 if (is_legacy)
437 priv->qos_data.def_qos_parm.ac[i].edca_txop =
438 cpu_to_le16(3264);
439 else
440 priv->qos_data.def_qos_parm.ac[i].edca_txop =
441 cpu_to_le16(1504);
442 } else {
443 for (i = 1; i < 4; i++) {
444 priv->qos_data.def_qos_parm.ac[i].cw_min =
445 cpu_to_le16(cw_min);
446 priv->qos_data.def_qos_parm.ac[i].cw_max =
447 cpu_to_le16(cw_max);
448 priv->qos_data.def_qos_parm.ac[i].aifsn = aifs;
449 priv->qos_data.def_qos_parm.ac[i].edca_txop = 0;
450 priv->qos_data.def_qos_parm.ac[i].reserved1 = 0;
451 }
452 }
453 IWL_DEBUG_QOS(priv, "set QoS to default \n");
454
455 spin_unlock_irqrestore(&priv->lock, flags);
456}
457EXPORT_SYMBOL(iwl_reset_qos);
458 249
459#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 250#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
460#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 251#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
@@ -903,23 +694,10 @@ EXPORT_SYMBOL(iwl_full_rxon_required);
903 694
904u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv) 695u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
905{ 696{
906 int i; 697 /*
907 int rate_mask; 698 * Assign the lowest rate -- should really get this from
908 699 * the beacon skb from mac80211.
909 /* Set rate mask*/ 700 */
910 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
911 rate_mask = priv->active_rate_basic & IWL_CCK_RATES_MASK;
912 else
913 rate_mask = priv->active_rate_basic & IWL_OFDM_RATES_MASK;
914
915 /* Find lowest valid rate */
916 for (i = IWL_RATE_1M_INDEX; i != IWL_RATE_INVALID;
917 i = iwl_rates[i].next_ieee) {
918 if (rate_mask & (1 << i))
919 return iwl_rates[i].plcp;
920 }
921
922 /* No valid rate was found. Assign the lowest one */
923 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) 701 if (priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK)
924 return IWL_RATE_1M_PLCP; 702 return IWL_RATE_1M_PLCP;
925 else 703 else
@@ -1107,12 +885,12 @@ void iwl_set_rxon_chain(struct iwl_priv *priv)
1107 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS; 885 rx_chain |= idle_rx_cnt << RXON_RX_CHAIN_CNT_POS;
1108 886
1109 /* copied from 'iwl_bg_request_scan()' */ 887 /* copied from 'iwl_bg_request_scan()' */
1110 /* Force use of chains B and C (0x6) for Rx for 4965 888 /* Force use of chains B and C (0x6) for Rx
1111 * Avoid A (0x1) because of its off-channel reception on A-band. 889 * Avoid A (0x1) for the device has off-channel reception on A-band.
1112 * MIMO is not used here, but value is required */ 890 * MIMO is not used here, but value is required */
1113 if (iwl_is_monitor_mode(priv) && 891 if (iwl_is_monitor_mode(priv) &&
1114 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) && 892 !(priv->staging_rxon.flags & RXON_FLG_BAND_24G_MSK) &&
1115 ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)) { 893 priv->cfg->off_channel_workaround) {
1116 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS; 894 rx_chain = ANT_ABC << RXON_RX_CHAIN_VALID_POS;
1117 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS; 895 rx_chain |= ANT_BC << RXON_RX_CHAIN_FORCE_SEL_POS;
1118 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS; 896 rx_chain |= ANT_ABC << RXON_RX_CHAIN_FORCE_MIMO_SEL_POS;
@@ -1244,14 +1022,6 @@ void iwl_connection_init_rx_config(struct iwl_priv *priv, int mode)
1244 if (!ch_info) 1022 if (!ch_info)
1245 ch_info = &priv->channel_info[0]; 1023 ch_info = &priv->channel_info[0];
1246 1024
1247 /*
1248 * in some case A channels are all non IBSS
1249 * in this case force B/G channel
1250 */
1251 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
1252 !(is_channel_ibss(ch_info)))
1253 ch_info = &priv->channel_info[0];
1254
1255 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel); 1025 priv->staging_rxon.channel = cpu_to_le16(ch_info->channel);
1256 priv->band = ch_info->band; 1026 priv->band = ch_info->band;
1257 1027
@@ -1286,7 +1056,6 @@ static void iwl_set_rate(struct iwl_priv *priv)
1286 } 1056 }
1287 1057
1288 priv->active_rate = 0; 1058 priv->active_rate = 0;
1289 priv->active_rate_basic = 0;
1290 1059
1291 for (i = 0; i < hw->n_bitrates; i++) { 1060 for (i = 0; i < hw->n_bitrates; i++) {
1292 rate = &(hw->bitrates[i]); 1061 rate = &(hw->bitrates[i]);
@@ -1294,30 +1063,13 @@ static void iwl_set_rate(struct iwl_priv *priv)
1294 priv->active_rate |= (1 << rate->hw_value); 1063 priv->active_rate |= (1 << rate->hw_value);
1295 } 1064 }
1296 1065
1297 IWL_DEBUG_RATE(priv, "Set active_rate = %0x, active_rate_basic = %0x\n", 1066 IWL_DEBUG_RATE(priv, "Set active_rate = %0x\n", priv->active_rate);
1298 priv->active_rate, priv->active_rate_basic);
1299 1067
1300 /* 1068 priv->staging_rxon.cck_basic_rates =
1301 * If a basic rate is configured, then use it (adding IWL_RATE_1M_MASK) 1069 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1302 * otherwise set it to the default of all CCK rates and 6, 12, 24 for 1070
1303 * OFDM 1071 priv->staging_rxon.ofdm_basic_rates =
1304 */ 1072 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1305 if (priv->active_rate_basic & IWL_CCK_BASIC_RATES_MASK)
1306 priv->staging_rxon.cck_basic_rates =
1307 ((priv->active_rate_basic &
1308 IWL_CCK_RATES_MASK) >> IWL_FIRST_CCK_RATE) & 0xF;
1309 else
1310 priv->staging_rxon.cck_basic_rates =
1311 (IWL_CCK_BASIC_RATES_MASK >> IWL_FIRST_CCK_RATE) & 0xF;
1312
1313 if (priv->active_rate_basic & IWL_OFDM_BASIC_RATES_MASK)
1314 priv->staging_rxon.ofdm_basic_rates =
1315 ((priv->active_rate_basic &
1316 (IWL_OFDM_BASIC_RATES_MASK | IWL_RATE_6M_MASK)) >>
1317 IWL_FIRST_OFDM_RATE) & 0xFF;
1318 else
1319 priv->staging_rxon.ofdm_basic_rates =
1320 (IWL_OFDM_BASIC_RATES_MASK >> IWL_FIRST_OFDM_RATE) & 0xFF;
1321} 1073}
1322 1074
1323void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1075void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
@@ -1401,7 +1153,7 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1401} 1153}
1402EXPORT_SYMBOL(iwl_irq_handle_error); 1154EXPORT_SYMBOL(iwl_irq_handle_error);
1403 1155
1404int iwl_apm_stop_master(struct iwl_priv *priv) 1156static int iwl_apm_stop_master(struct iwl_priv *priv)
1405{ 1157{
1406 int ret = 0; 1158 int ret = 0;
1407 1159
@@ -1417,7 +1169,6 @@ int iwl_apm_stop_master(struct iwl_priv *priv)
1417 1169
1418 return ret; 1170 return ret;
1419} 1171}
1420EXPORT_SYMBOL(iwl_apm_stop_master);
1421 1172
1422void iwl_apm_stop(struct iwl_priv *priv) 1173void iwl_apm_stop(struct iwl_priv *priv)
1423{ 1174{
@@ -1626,10 +1377,11 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1626 int ret = 0; 1377 int ret = 0;
1627 s8 prev_tx_power = priv->tx_power_user_lmt; 1378 s8 prev_tx_power = priv->tx_power_user_lmt;
1628 1379
1629 if (tx_power < IWL_TX_POWER_TARGET_POWER_MIN) { 1380 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1630 IWL_WARN(priv, "Requested user TXPOWER %d below lower limit %d.\n", 1381 IWL_WARN(priv,
1382 "Requested user TXPOWER %d below lower limit %d.\n",
1631 tx_power, 1383 tx_power,
1632 IWL_TX_POWER_TARGET_POWER_MIN); 1384 IWLAGN_TX_POWER_TARGET_POWER_MIN);
1633 return -EINVAL; 1385 return -EINVAL;
1634 } 1386 }
1635 1387
@@ -1668,286 +1420,16 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1668} 1420}
1669EXPORT_SYMBOL(iwl_set_tx_power); 1421EXPORT_SYMBOL(iwl_set_tx_power);
1670 1422
1671#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1672
1673/* Free dram table */
1674void iwl_free_isr_ict(struct iwl_priv *priv)
1675{
1676 if (priv->ict_tbl_vir) {
1677 dma_free_coherent(&priv->pci_dev->dev,
1678 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1679 priv->ict_tbl_vir, priv->ict_tbl_dma);
1680 priv->ict_tbl_vir = NULL;
1681 }
1682}
1683EXPORT_SYMBOL(iwl_free_isr_ict);
1684
1685
1686/* allocate dram shared table it is a PAGE_SIZE aligned
1687 * also reset all data related to ICT table interrupt.
1688 */
1689int iwl_alloc_isr_ict(struct iwl_priv *priv)
1690{
1691
1692 if (priv->cfg->use_isr_legacy)
1693 return 0;
1694 /* allocate shrared data table */
1695 priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
1696 (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
1697 &priv->ict_tbl_dma, GFP_KERNEL);
1698 if (!priv->ict_tbl_vir)
1699 return -ENOMEM;
1700
1701 /* align table to PAGE_SIZE boundry */
1702 priv->aligned_ict_tbl_dma = ALIGN(priv->ict_tbl_dma, PAGE_SIZE);
1703
1704 IWL_DEBUG_ISR(priv, "ict dma addr %Lx dma aligned %Lx diff %d\n",
1705 (unsigned long long)priv->ict_tbl_dma,
1706 (unsigned long long)priv->aligned_ict_tbl_dma,
1707 (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1708
1709 priv->ict_tbl = priv->ict_tbl_vir +
1710 (priv->aligned_ict_tbl_dma - priv->ict_tbl_dma);
1711
1712 IWL_DEBUG_ISR(priv, "ict vir addr %p vir aligned %p diff %d\n",
1713 priv->ict_tbl, priv->ict_tbl_vir,
1714 (int)(priv->aligned_ict_tbl_dma - priv->ict_tbl_dma));
1715
1716 /* reset table and index to all 0 */
1717 memset(priv->ict_tbl_vir,0, (sizeof(u32) * ICT_COUNT) + PAGE_SIZE);
1718 priv->ict_index = 0;
1719
1720 /* add periodic RX interrupt */
1721 priv->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
1722 return 0;
1723}
1724EXPORT_SYMBOL(iwl_alloc_isr_ict);
1725
1726/* Device is going up inform it about using ICT interrupt table,
1727 * also we need to tell the driver to start using ICT interrupt.
1728 */
1729int iwl_reset_ict(struct iwl_priv *priv)
1730{
1731 u32 val;
1732 unsigned long flags;
1733
1734 if (!priv->ict_tbl_vir)
1735 return 0;
1736
1737 spin_lock_irqsave(&priv->lock, flags);
1738 iwl_disable_interrupts(priv);
1739
1740 memset(&priv->ict_tbl[0], 0, sizeof(u32) * ICT_COUNT);
1741
1742 val = priv->aligned_ict_tbl_dma >> PAGE_SHIFT;
1743
1744 val |= CSR_DRAM_INT_TBL_ENABLE;
1745 val |= CSR_DRAM_INIT_TBL_WRAP_CHECK;
1746
1747 IWL_DEBUG_ISR(priv, "CSR_DRAM_INT_TBL_REG =0x%X "
1748 "aligned dma address %Lx\n",
1749 val, (unsigned long long)priv->aligned_ict_tbl_dma);
1750
1751 iwl_write32(priv, CSR_DRAM_INT_TBL_REG, val);
1752 priv->use_ict = true;
1753 priv->ict_index = 0;
1754 iwl_write32(priv, CSR_INT, priv->inta_mask);
1755 iwl_enable_interrupts(priv);
1756 spin_unlock_irqrestore(&priv->lock, flags);
1757
1758 return 0;
1759}
1760EXPORT_SYMBOL(iwl_reset_ict);
1761
1762/* Device is going down disable ict interrupt usage */
1763void iwl_disable_ict(struct iwl_priv *priv)
1764{
1765 unsigned long flags;
1766
1767 spin_lock_irqsave(&priv->lock, flags);
1768 priv->use_ict = false;
1769 spin_unlock_irqrestore(&priv->lock, flags);
1770}
1771EXPORT_SYMBOL(iwl_disable_ict);
1772
1773/* interrupt handler using ict table, with this interrupt driver will
1774 * stop using INTA register to get device's interrupt, reading this register
1775 * is expensive, device will write interrupts in ICT dram table, increment
1776 * index then will fire interrupt to driver, driver will OR all ICT table
1777 * entries from current index up to table entry with 0 value. the result is
1778 * the interrupt we need to service, driver will set the entries back to 0 and
1779 * set index.
1780 */
1781irqreturn_t iwl_isr_ict(int irq, void *data)
1782{
1783 struct iwl_priv *priv = data;
1784 u32 inta, inta_mask;
1785 u32 val = 0;
1786
1787 if (!priv)
1788 return IRQ_NONE;
1789
1790 /* dram interrupt table not set yet,
1791 * use legacy interrupt.
1792 */
1793 if (!priv->use_ict)
1794 return iwl_isr(irq, data);
1795
1796 spin_lock(&priv->lock);
1797
1798 /* Disable (but don't clear!) interrupts here to avoid
1799 * back-to-back ISRs and sporadic interrupts from our NIC.
1800 * If we have something to service, the tasklet will re-enable ints.
1801 * If we *don't* have something, we'll re-enable before leaving here.
1802 */
1803 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1804 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1805
1806
1807 /* Ignore interrupt if there's nothing in NIC to service.
1808 * This may be due to IRQ shared with another device,
1809 * or due to sporadic interrupts thrown from our NIC. */
1810 if (!priv->ict_tbl[priv->ict_index]) {
1811 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1812 goto none;
1813 }
1814
1815 /* read all entries that not 0 start with ict_index */
1816 while (priv->ict_tbl[priv->ict_index]) {
1817
1818 val |= le32_to_cpu(priv->ict_tbl[priv->ict_index]);
1819 IWL_DEBUG_ISR(priv, "ICT index %d value 0x%08X\n",
1820 priv->ict_index,
1821 le32_to_cpu(priv->ict_tbl[priv->ict_index]));
1822 priv->ict_tbl[priv->ict_index] = 0;
1823 priv->ict_index = iwl_queue_inc_wrap(priv->ict_index,
1824 ICT_COUNT);
1825
1826 }
1827
1828 /* We should not get this value, just ignore it. */
1829 if (val == 0xffffffff)
1830 val = 0;
1831
1832 /*
1833 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1834 * (bit 15 before shifting it to 31) to clear when using interrupt
1835 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1836 * so we use them to decide on the real state of the Rx bit.
1837 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1838 */
1839 if (val & 0xC0000)
1840 val |= 0x8000;
1841
1842 inta = (0xff & val) | ((0xff00 & val) << 16);
1843 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
1844 inta, inta_mask, val);
1845
1846 inta &= priv->inta_mask;
1847 priv->inta |= inta;
1848
1849 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1850 if (likely(inta))
1851 tasklet_schedule(&priv->irq_tasklet);
1852 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta) {
1853 /* Allow interrupt if was disabled by this handler and
1854 * no tasklet was schedules, We should not enable interrupt,
1855 * tasklet will enable it.
1856 */
1857 iwl_enable_interrupts(priv);
1858 }
1859
1860 spin_unlock(&priv->lock);
1861 return IRQ_HANDLED;
1862
1863 none:
1864 /* re-enable interrupts here since we don't have anything to service.
1865 * only Re-enable if disabled by irq.
1866 */
1867 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1868 iwl_enable_interrupts(priv);
1869
1870 spin_unlock(&priv->lock);
1871 return IRQ_NONE;
1872}
1873EXPORT_SYMBOL(iwl_isr_ict);
1874
1875
1876static irqreturn_t iwl_isr(int irq, void *data)
1877{
1878 struct iwl_priv *priv = data;
1879 u32 inta, inta_mask;
1880#ifdef CONFIG_IWLWIFI_DEBUG
1881 u32 inta_fh;
1882#endif
1883 if (!priv)
1884 return IRQ_NONE;
1885
1886 spin_lock(&priv->lock);
1887
1888 /* Disable (but don't clear!) interrupts here to avoid
1889 * back-to-back ISRs and sporadic interrupts from our NIC.
1890 * If we have something to service, the tasklet will re-enable ints.
1891 * If we *don't* have something, we'll re-enable before leaving here. */
1892 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1893 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1894
1895 /* Discover which interrupts are active/pending */
1896 inta = iwl_read32(priv, CSR_INT);
1897
1898 /* Ignore interrupt if there's nothing in NIC to service.
1899 * This may be due to IRQ shared with another device,
1900 * or due to sporadic interrupts thrown from our NIC. */
1901 if (!inta) {
1902 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0\n");
1903 goto none;
1904 }
1905
1906 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1907 /* Hardware disappeared. It might have already raised
1908 * an interrupt */
1909 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1910 goto unplugged;
1911 }
1912
1913#ifdef CONFIG_IWLWIFI_DEBUG
1914 if (iwl_get_debug_level(priv) & (IWL_DL_ISR)) {
1915 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1916 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, "
1917 "fh 0x%08x\n", inta, inta_mask, inta_fh);
1918 }
1919#endif
1920
1921 priv->inta |= inta;
1922 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1923 if (likely(inta))
1924 tasklet_schedule(&priv->irq_tasklet);
1925 else if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1926 iwl_enable_interrupts(priv);
1927
1928 unplugged:
1929 spin_unlock(&priv->lock);
1930 return IRQ_HANDLED;
1931
1932 none:
1933 /* re-enable interrupts here since we don't have anything to service. */
1934 /* only Re-enable if diabled by irq and no schedules tasklet. */
1935 if (test_bit(STATUS_INT_ENABLED, &priv->status) && !priv->inta)
1936 iwl_enable_interrupts(priv);
1937
1938 spin_unlock(&priv->lock);
1939 return IRQ_NONE;
1940}
1941
1942irqreturn_t iwl_isr_legacy(int irq, void *data) 1423irqreturn_t iwl_isr_legacy(int irq, void *data)
1943{ 1424{
1944 struct iwl_priv *priv = data; 1425 struct iwl_priv *priv = data;
1945 u32 inta, inta_mask; 1426 u32 inta, inta_mask;
1946 u32 inta_fh; 1427 u32 inta_fh;
1428 unsigned long flags;
1947 if (!priv) 1429 if (!priv)
1948 return IRQ_NONE; 1430 return IRQ_NONE;
1949 1431
1950 spin_lock(&priv->lock); 1432 spin_lock_irqsave(&priv->lock, flags);
1951 1433
1952 /* Disable (but don't clear!) interrupts here to avoid 1434 /* Disable (but don't clear!) interrupts here to avoid
1953 * back-to-back ISRs and sporadic interrupts from our NIC. 1435 * back-to-back ISRs and sporadic interrupts from our NIC.
@@ -1985,7 +1467,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
1985 tasklet_schedule(&priv->irq_tasklet); 1467 tasklet_schedule(&priv->irq_tasklet);
1986 1468
1987 unplugged: 1469 unplugged:
1988 spin_unlock(&priv->lock); 1470 spin_unlock_irqrestore(&priv->lock, flags);
1989 return IRQ_HANDLED; 1471 return IRQ_HANDLED;
1990 1472
1991 none: 1473 none:
@@ -1993,7 +1475,7 @@ irqreturn_t iwl_isr_legacy(int irq, void *data)
1993 /* only Re-enable if diabled by irq */ 1475 /* only Re-enable if diabled by irq */
1994 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1476 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1995 iwl_enable_interrupts(priv); 1477 iwl_enable_interrupts(priv);
1996 spin_unlock(&priv->lock); 1478 spin_unlock_irqrestore(&priv->lock, flags);
1997 return IRQ_NONE; 1479 return IRQ_NONE;
1998} 1480}
1999EXPORT_SYMBOL(iwl_isr_legacy); 1481EXPORT_SYMBOL(iwl_isr_legacy);
@@ -2306,12 +1788,6 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2306 cpu_to_le16((params->txop * 32)); 1788 cpu_to_le16((params->txop * 32));
2307 1789
2308 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0; 1790 priv->qos_data.def_qos_parm.ac[q].reserved1 = 0;
2309 priv->qos_data.qos_active = 1;
2310
2311 if (priv->iw_mode == NL80211_IFTYPE_AP)
2312 iwl_activate_qos(priv, 1);
2313 else if (priv->assoc_id && iwl_is_associated(priv))
2314 iwl_activate_qos(priv, 0);
2315 1791
2316 spin_unlock_irqrestore(&priv->lock, flags); 1792 spin_unlock_irqrestore(&priv->lock, flags);
2317 1793
@@ -2326,7 +1802,7 @@ static void iwl_ht_conf(struct iwl_priv *priv,
2326 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1802 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2327 struct ieee80211_sta *sta; 1803 struct ieee80211_sta *sta;
2328 1804
2329 IWL_DEBUG_MAC80211(priv, "enter: \n"); 1805 IWL_DEBUG_MAC80211(priv, "enter:\n");
2330 1806
2331 if (!ht_conf->is_ht) 1807 if (!ht_conf->is_ht)
2332 return; 1808 return;
@@ -2568,11 +2044,6 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2568 return -EIO; 2044 return -EIO;
2569 } 2045 }
2570 2046
2571 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2572 IWL_DEBUG_MAC80211(priv, "leave - not IBSS\n");
2573 return -EIO;
2574 }
2575
2576 spin_lock_irqsave(&priv->lock, flags); 2047 spin_lock_irqsave(&priv->lock, flags);
2577 2048
2578 if (priv->ibss_beacon) 2049 if (priv->ibss_beacon)
@@ -2587,52 +2058,25 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb)
2587 IWL_DEBUG_MAC80211(priv, "leave\n"); 2058 IWL_DEBUG_MAC80211(priv, "leave\n");
2588 spin_unlock_irqrestore(&priv->lock, flags); 2059 spin_unlock_irqrestore(&priv->lock, flags);
2589 2060
2590 iwl_reset_qos(priv);
2591
2592 priv->cfg->ops->lib->post_associate(priv); 2061 priv->cfg->ops->lib->post_associate(priv);
2593 2062
2594
2595 return 0; 2063 return 0;
2596} 2064}
2597EXPORT_SYMBOL(iwl_mac_beacon_update); 2065EXPORT_SYMBOL(iwl_mac_beacon_update);
2598 2066
2599int iwl_set_mode(struct iwl_priv *priv, int mode) 2067static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
2600{ 2068{
2601 if (mode == NL80211_IFTYPE_ADHOC) { 2069 iwl_connection_init_rx_config(priv, vif->type);
2602 const struct iwl_channel_info *ch_info;
2603
2604 ch_info = iwl_get_channel_info(priv,
2605 priv->band,
2606 le16_to_cpu(priv->staging_rxon.channel));
2607
2608 if (!ch_info || !is_channel_ibss(ch_info)) {
2609 IWL_ERR(priv, "channel %d not IBSS channel\n",
2610 le16_to_cpu(priv->staging_rxon.channel));
2611 return -EINVAL;
2612 }
2613 }
2614
2615 iwl_connection_init_rx_config(priv, mode);
2616 2070
2617 if (priv->cfg->ops->hcmd->set_rxon_chain) 2071 if (priv->cfg->ops->hcmd->set_rxon_chain)
2618 priv->cfg->ops->hcmd->set_rxon_chain(priv); 2072 priv->cfg->ops->hcmd->set_rxon_chain(priv);
2619 2073
2620 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN); 2074 memcpy(priv->staging_rxon.node_addr, priv->mac_addr, ETH_ALEN);
2621 2075
2622 iwl_clear_stations_table(priv); 2076 return iwlcore_commit_rxon(priv);
2623
2624 /* dont commit rxon if rf-kill is on*/
2625 if (!iwl_is_ready_rf(priv))
2626 return -EAGAIN;
2627
2628 iwlcore_commit_rxon(priv);
2629
2630 return 0;
2631} 2077}
2632EXPORT_SYMBOL(iwl_set_mode);
2633 2078
2634int iwl_mac_add_interface(struct ieee80211_hw *hw, 2079int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
2635 struct ieee80211_vif *vif)
2636{ 2080{
2637 struct iwl_priv *priv = hw->priv; 2081 struct iwl_priv *priv = hw->priv;
2638 int err = 0; 2082 int err = 0;
@@ -2641,6 +2085,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2641 2085
2642 mutex_lock(&priv->mutex); 2086 mutex_lock(&priv->mutex);
2643 2087
2088 if (WARN_ON(!iwl_is_ready_rf(priv))) {
2089 err = -EINVAL;
2090 goto out;
2091 }
2092
2644 if (priv->vif) { 2093 if (priv->vif) {
2645 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n"); 2094 IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
2646 err = -EOPNOTSUPP; 2095 err = -EOPNOTSUPP;
@@ -2650,15 +2099,21 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2650 priv->vif = vif; 2099 priv->vif = vif;
2651 priv->iw_mode = vif->type; 2100 priv->iw_mode = vif->type;
2652 2101
2653 if (vif->addr) { 2102 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
2654 IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr); 2103 memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
2655 memcpy(priv->mac_addr, vif->addr, ETH_ALEN); 2104
2656 } 2105 err = iwl_set_mode(priv, vif);
2106 if (err)
2107 goto out_err;
2108
2109 /* Add the broadcast address so we can send broadcast frames */
2110 priv->cfg->ops->lib->add_bcast_station(priv);
2657 2111
2658 if (iwl_set_mode(priv, vif->type) == -EAGAIN) 2112 goto out;
2659 /* we are not ready, will run again when ready */
2660 set_bit(STATUS_MODE_PENDING, &priv->status);
2661 2113
2114 out_err:
2115 priv->vif = NULL;
2116 priv->iw_mode = NL80211_IFTYPE_STATION;
2662 out: 2117 out:
2663 mutex_unlock(&priv->mutex); 2118 mutex_unlock(&priv->mutex);
2664 2119
@@ -2668,7 +2123,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw,
2668EXPORT_SYMBOL(iwl_mac_add_interface); 2123EXPORT_SYMBOL(iwl_mac_add_interface);
2669 2124
2670void iwl_mac_remove_interface(struct ieee80211_hw *hw, 2125void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2671 struct ieee80211_vif *vif) 2126 struct ieee80211_vif *vif)
2672{ 2127{
2673 struct iwl_priv *priv = hw->priv; 2128 struct iwl_priv *priv = hw->priv;
2674 2129
@@ -2676,6 +2131,8 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
2676 2131
2677 mutex_lock(&priv->mutex); 2132 mutex_lock(&priv->mutex);
2678 2133
2134 iwl_clear_ucode_stations(priv, true);
2135
2679 if (iwl_is_ready_rf(priv)) { 2136 if (iwl_is_ready_rf(priv)) {
2680 iwl_scan_cancel_timeout(priv, 100); 2137 iwl_scan_cancel_timeout(priv, 100);
2681 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2138 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
@@ -2752,15 +2209,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2752 goto set_ch_out; 2209 goto set_ch_out;
2753 } 2210 }
2754 2211
2755 if (priv->iw_mode == NL80211_IFTYPE_ADHOC &&
2756 !is_channel_ibss(ch_info)) {
2757 IWL_ERR(priv, "channel %d in band %d not "
2758 "IBSS channel\n",
2759 conf->channel->hw_value, conf->channel->band);
2760 ret = -EINVAL;
2761 goto set_ch_out;
2762 }
2763
2764 spin_lock_irqsave(&priv->lock, flags); 2212 spin_lock_irqsave(&priv->lock, flags);
2765 2213
2766 /* Configure HT40 channels */ 2214 /* Configure HT40 channels */
@@ -2833,6 +2281,15 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2833 iwl_set_tx_power(priv, conf->power_level, false); 2281 iwl_set_tx_power(priv, conf->power_level, false);
2834 } 2282 }
2835 2283
2284 if (changed & IEEE80211_CONF_CHANGE_QOS) {
2285 bool qos_active = !!(conf->flags & IEEE80211_CONF_QOS);
2286
2287 spin_lock_irqsave(&priv->lock, flags);
2288 priv->qos_data.qos_active = qos_active;
2289 iwl_update_qos(priv);
2290 spin_unlock_irqrestore(&priv->lock, flags);
2291 }
2292
2836 if (!iwl_is_ready(priv)) { 2293 if (!iwl_is_ready(priv)) {
2837 IWL_DEBUG_MAC80211(priv, "leave - not ready\n"); 2294 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2838 goto out; 2295 goto out;
@@ -2867,12 +2324,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2867 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config)); 2324 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2868 spin_unlock_irqrestore(&priv->lock, flags); 2325 spin_unlock_irqrestore(&priv->lock, flags);
2869 2326
2870 iwl_reset_qos(priv);
2871
2872 spin_lock_irqsave(&priv->lock, flags); 2327 spin_lock_irqsave(&priv->lock, flags);
2873 priv->assoc_id = 0; 2328 priv->assoc_id = 0;
2874 priv->assoc_capability = 0; 2329 priv->assoc_capability = 0;
2875 priv->assoc_station_added = 0;
2876 2330
2877 /* new association get rid of ibss beacon skb */ 2331 /* new association get rid of ibss beacon skb */
2878 if (priv->ibss_beacon) 2332 if (priv->ibss_beacon)
@@ -2882,8 +2336,6 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2882 2336
2883 priv->beacon_int = priv->vif->bss_conf.beacon_int; 2337 priv->beacon_int = priv->vif->bss_conf.beacon_int;
2884 priv->timestamp = 0; 2338 priv->timestamp = 0;
2885 if ((priv->iw_mode == NL80211_IFTYPE_STATION))
2886 priv->beacon_int = 0;
2887 2339
2888 spin_unlock_irqrestore(&priv->lock, flags); 2340 spin_unlock_irqrestore(&priv->lock, flags);
2889 2341
@@ -2896,17 +2348,9 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2896 /* we are restarting association process 2348 /* we are restarting association process
2897 * clear RXON_FILTER_ASSOC_MSK bit 2349 * clear RXON_FILTER_ASSOC_MSK bit
2898 */ 2350 */
2899 if (priv->iw_mode != NL80211_IFTYPE_AP) { 2351 iwl_scan_cancel_timeout(priv, 100);
2900 iwl_scan_cancel_timeout(priv, 100); 2352 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2901 priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK; 2353 iwlcore_commit_rxon(priv);
2902 iwlcore_commit_rxon(priv);
2903 }
2904
2905 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2906 IWL_DEBUG_MAC80211(priv, "leave - not in IBSS\n");
2907 mutex_unlock(&priv->mutex);
2908 return;
2909 }
2910 2354
2911 iwl_set_rate(priv); 2355 iwl_set_rate(priv);
2912 2356
@@ -2923,7 +2367,7 @@ int iwl_alloc_txq_mem(struct iwl_priv *priv)
2923 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues, 2367 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2924 GFP_KERNEL); 2368 GFP_KERNEL);
2925 if (!priv->txq) { 2369 if (!priv->txq) {
2926 IWL_ERR(priv, "Not enough memory for txq \n"); 2370 IWL_ERR(priv, "Not enough memory for txq\n");
2927 return -ENOMEM; 2371 return -ENOMEM;
2928 } 2372 }
2929 return 0; 2373 return 0;
@@ -3403,6 +2847,99 @@ int iwl_force_reset(struct iwl_priv *priv, int mode)
3403 } 2847 }
3404 return 0; 2848 return 0;
3405} 2849}
2850EXPORT_SYMBOL(iwl_force_reset);
2851
2852/**
2853 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover
2854 *
2855 * During normal condition (no queue is stuck), the timer is continually set to
2856 * execute every monitor_recover_period milliseconds after the last timer
2857 * expired. When the queue read_ptr is at the same place, the timer is
2858 * shorten to 100mSecs. This is
2859 * 1) to reduce the chance that the read_ptr may wrap around (not stuck)
2860 * 2) to detect the stuck queues quicker before the station and AP can
2861 * disassociate each other.
2862 *
2863 * This function monitors all the tx queues and recover from it if any
2864 * of the queues are stuck.
2865 * 1. It first check the cmd queue for stuck conditions. If it is stuck,
2866 * it will recover by resetting the firmware and return.
2867 * 2. Then, it checks for station association. If it associates it will check
2868 * other queues. If any queue is stuck, it will recover by resetting
2869 * the firmware.
2870 * Note: It the number of times the queue read_ptr to be at the same place to
2871 * be MAX_REPEAT+1 in order to consider to be stuck.
2872 */
2873/*
2874 * The maximum number of times the read pointer of the tx queue at the
2875 * same place without considering to be stuck.
2876 */
2877#define MAX_REPEAT (2)
2878static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2879{
2880 struct iwl_tx_queue *txq;
2881 struct iwl_queue *q;
2882
2883 txq = &priv->txq[cnt];
2884 q = &txq->q;
2885 /* queue is empty, skip */
2886 if (q->read_ptr != q->write_ptr) {
2887 if (q->read_ptr == q->last_read_ptr) {
2888 /* a queue has not been read from last time */
2889 if (q->repeat_same_read_ptr > MAX_REPEAT) {
2890 IWL_ERR(priv,
2891 "queue %d stuck %d time. Fw reload.\n",
2892 q->id, q->repeat_same_read_ptr);
2893 q->repeat_same_read_ptr = 0;
2894 iwl_force_reset(priv, IWL_FW_RESET);
2895 } else {
2896 q->repeat_same_read_ptr++;
2897 IWL_DEBUG_RADIO(priv,
2898 "queue %d, not read %d time\n",
2899 q->id,
2900 q->repeat_same_read_ptr);
2901 mod_timer(&priv->monitor_recover, jiffies +
2902 msecs_to_jiffies(IWL_ONE_HUNDRED_MSECS));
2903 }
2904 return 1;
2905 } else {
2906 q->last_read_ptr = q->read_ptr;
2907 q->repeat_same_read_ptr = 0;
2908 }
2909 }
2910 return 0;
2911}
2912
2913void iwl_bg_monitor_recover(unsigned long data)
2914{
2915 struct iwl_priv *priv = (struct iwl_priv *)data;
2916 int cnt;
2917
2918 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2919 return;
2920
2921 /* monitor and check for stuck cmd queue */
2922 if (iwl_check_stuck_queue(priv, IWL_CMD_QUEUE_NUM))
2923 return;
2924
2925 /* monitor and check for other stuck queues */
2926 if (iwl_is_associated(priv)) {
2927 for (cnt = 0; cnt < priv->hw_params.max_txq_num; cnt++) {
2928 /* skip as we already checked the command queue */
2929 if (cnt == IWL_CMD_QUEUE_NUM)
2930 continue;
2931 if (iwl_check_stuck_queue(priv, cnt))
2932 return;
2933 }
2934 }
2935 /*
2936 * Reschedule the timer to occur in
2937 * priv->cfg->monitor_recover_period
2938 */
2939 mod_timer(&priv->monitor_recover,
2940 jiffies + msecs_to_jiffies(priv->cfg->monitor_recover_period));
2941}
2942EXPORT_SYMBOL(iwl_bg_monitor_recover);
3406 2943
3407#ifdef CONFIG_PM 2944#ifdef CONFIG_PM
3408 2945
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 36940a9ec6b9..6c3f0127f743 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -191,6 +191,14 @@ struct iwl_lib_ops {
191 struct iwl_temp_ops temp_ops; 191 struct iwl_temp_ops temp_ops;
192 /* station management */ 192 /* station management */
193 void (*add_bcast_station)(struct iwl_priv *priv); 193 void (*add_bcast_station)(struct iwl_priv *priv);
194 /* recover from tx queue stall */
195 void (*recover_from_tx_stall)(unsigned long data);
196 /* check for plcp health */
197 bool (*check_plcp_health)(struct iwl_priv *priv,
198 struct iwl_rx_packet *pkt);
199 /* check for ack health */
200 bool (*check_ack_health)(struct iwl_priv *priv,
201 struct iwl_rx_packet *pkt);
194}; 202};
195 203
196struct iwl_led_ops { 204struct iwl_led_ops {
@@ -295,6 +303,11 @@ struct iwl_cfg {
295 const bool support_wimax_coexist; 303 const bool support_wimax_coexist;
296 u8 plcp_delta_threshold; 304 u8 plcp_delta_threshold;
297 s32 chain_noise_scale; 305 s32 chain_noise_scale;
306 /* timer period for monitor the driver queues */
307 u32 monitor_recover_period;
308 bool temperature_kelvin;
309 bool off_channel_workaround;
310 u32 max_event_log_size;
298}; 311};
299 312
300/*************************** 313/***************************
@@ -304,8 +317,7 @@ struct iwl_cfg {
304struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 317struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
305 struct ieee80211_ops *hw_ops); 318 struct ieee80211_ops *hw_ops);
306void iwl_hw_detect(struct iwl_priv *priv); 319void iwl_hw_detect(struct iwl_priv *priv);
307void iwl_reset_qos(struct iwl_priv *priv); 320void iwl_activate_qos(struct iwl_priv *priv);
308void iwl_activate_qos(struct iwl_priv *priv, u8 force);
309int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 321int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
310 const struct ieee80211_tx_queue_params *params); 322 const struct ieee80211_tx_queue_params *params);
311void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt); 323void iwl_set_rxon_hwcrypto(struct iwl_priv *priv, int hw_decrypt);
@@ -326,7 +338,6 @@ void iwl_irq_handle_error(struct iwl_priv *priv);
326void iwl_configure_filter(struct ieee80211_hw *hw, 338void iwl_configure_filter(struct ieee80211_hw *hw,
327 unsigned int changed_flags, 339 unsigned int changed_flags,
328 unsigned int *total_flags, u64 multicast); 340 unsigned int *total_flags, u64 multicast);
329int iwl_hw_nic_init(struct iwl_priv *priv);
330int iwl_set_hw_params(struct iwl_priv *priv); 341int iwl_set_hw_params(struct iwl_priv *priv);
331bool iwl_is_monitor_mode(struct iwl_priv *priv); 342bool iwl_is_monitor_mode(struct iwl_priv *priv);
332void iwl_post_associate(struct iwl_priv *priv); 343void iwl_post_associate(struct iwl_priv *priv);
@@ -336,7 +347,6 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
336 u32 changes); 347 u32 changes);
337int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb); 348int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
338int iwl_commit_rxon(struct iwl_priv *priv); 349int iwl_commit_rxon(struct iwl_priv *priv);
339int iwl_set_mode(struct iwl_priv *priv, int mode);
340int iwl_mac_add_interface(struct ieee80211_hw *hw, 350int iwl_mac_add_interface(struct ieee80211_hw *hw,
341 struct ieee80211_vif *vif); 351 struct ieee80211_vif *vif);
342void iwl_mac_remove_interface(struct ieee80211_hw *hw, 352void iwl_mac_remove_interface(struct ieee80211_hw *hw,
@@ -411,26 +421,22 @@ void iwl_rx_reply_error(struct iwl_priv *priv,
411/***************************************************** 421/*****************************************************
412* RX 422* RX
413******************************************************/ 423******************************************************/
414void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
415void iwl_cmd_queue_free(struct iwl_priv *priv); 424void iwl_cmd_queue_free(struct iwl_priv *priv);
416int iwl_rx_queue_alloc(struct iwl_priv *priv); 425int iwl_rx_queue_alloc(struct iwl_priv *priv);
417void iwl_rx_handle(struct iwl_priv *priv); 426void iwl_rx_handle(struct iwl_priv *priv);
418void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, 427void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
419 struct iwl_rx_queue *q); 428 struct iwl_rx_queue *q);
420void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
421void iwl_rx_replenish(struct iwl_priv *priv);
422void iwl_rx_replenish_now(struct iwl_priv *priv);
423int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
424void iwl_rx_queue_restock(struct iwl_priv *priv);
425int iwl_rx_queue_space(const struct iwl_rx_queue *q); 429int iwl_rx_queue_space(const struct iwl_rx_queue *q);
426void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
427void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb); 430void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
428int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
429/* Handlers */ 431/* Handlers */
430void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 432void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
431 struct iwl_rx_mem_buffer *rxb); 433 struct iwl_rx_mem_buffer *rxb);
432void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 434void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
433 struct iwl_rx_mem_buffer *rxb); 435 struct iwl_rx_mem_buffer *rxb);
436bool iwl_good_plcp_health(struct iwl_priv *priv,
437 struct iwl_rx_packet *pkt);
438bool iwl_good_ack_health(struct iwl_priv *priv,
439 struct iwl_rx_packet *pkt);
434void iwl_rx_statistics(struct iwl_priv *priv, 440void iwl_rx_statistics(struct iwl_priv *priv,
435 struct iwl_rx_mem_buffer *rxb); 441 struct iwl_rx_mem_buffer *rxb);
436void iwl_reply_statistics(struct iwl_priv *priv, 442void iwl_reply_statistics(struct iwl_priv *priv,
@@ -442,14 +448,10 @@ void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
442/***************************************************** 448/*****************************************************
443* TX 449* TX
444******************************************************/ 450******************************************************/
445int iwl_txq_ctx_alloc(struct iwl_priv *priv);
446void iwl_txq_ctx_reset(struct iwl_priv *priv);
447void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq); 451void iwl_hw_txq_free_tfd(struct iwl_priv *priv, struct iwl_tx_queue *txq);
448int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv, 452int iwl_hw_txq_attach_buf_to_tfd(struct iwl_priv *priv,
449 struct iwl_tx_queue *txq, 453 struct iwl_tx_queue *txq,
450 dma_addr_t addr, u16 len, u8 reset, u8 pad); 454 dma_addr_t addr, u16 len, u8 reset, u8 pad);
451int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
452void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
453int iwl_hw_tx_queue_init(struct iwl_priv *priv, 455int iwl_hw_tx_queue_init(struct iwl_priv *priv,
454 struct iwl_tx_queue *txq); 456 struct iwl_tx_queue *txq);
455void iwl_free_tfds_in_queue(struct iwl_priv *priv, 457void iwl_free_tfds_in_queue(struct iwl_priv *priv,
@@ -460,9 +462,6 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
460void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 462void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
461 int slots_num, u32 txq_id); 463 int slots_num, u32 txq_id);
462void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 464void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
463int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn);
464int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid);
465int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id);
466/***************************************************** 465/*****************************************************
467 * TX power 466 * TX power
468 ****************************************************/ 467 ****************************************************/
@@ -472,10 +471,7 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force);
472 * Rate 471 * Rate
473 ******************************************************************************/ 472 ******************************************************************************/
474 473
475void iwl_hwrate_to_tx_control(struct iwl_priv *priv, u32 rate_n_flags,
476 struct ieee80211_tx_info *info);
477int iwl_hwrate_to_plcp_idx(u32 rate_n_flags); 474int iwl_hwrate_to_plcp_idx(u32 rate_n_flags);
478int iwl_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band);
479 475
480u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv); 476u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv);
481 477
@@ -563,11 +559,6 @@ int iwl_send_card_state(struct iwl_priv *priv, u32 flags,
563 * PCI * 559 * PCI *
564 *****************************************************/ 560 *****************************************************/
565irqreturn_t iwl_isr_legacy(int irq, void *data); 561irqreturn_t iwl_isr_legacy(int irq, void *data);
566int iwl_reset_ict(struct iwl_priv *priv);
567void iwl_disable_ict(struct iwl_priv *priv);
568int iwl_alloc_isr_ict(struct iwl_priv *priv);
569void iwl_free_isr_ict(struct iwl_priv *priv);
570irqreturn_t iwl_isr_ict(int irq, void *data);
571 562
572static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) 563static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
573{ 564{
@@ -577,6 +568,9 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
577 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl); 568 pci_read_config_word(priv->pci_dev, pos + PCI_EXP_LNKCTL, &pci_lnk_ctl);
578 return pci_lnk_ctl; 569 return pci_lnk_ctl;
579} 570}
571
572void iwl_bg_monitor_recover(unsigned long data);
573
580#ifdef CONFIG_PM 574#ifdef CONFIG_PM
581int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); 575int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
582int iwl_pci_resume(struct pci_dev *pdev); 576int iwl_pci_resume(struct pci_dev *pdev);
@@ -625,7 +619,6 @@ void iwlcore_free_geos(struct iwl_priv *priv);
625#define STATUS_SCAN_HW 15 619#define STATUS_SCAN_HW 15
626#define STATUS_POWER_PMI 16 620#define STATUS_POWER_PMI 16
627#define STATUS_FW_ERROR 17 621#define STATUS_FW_ERROR 17
628#define STATUS_MODE_PENDING 18
629 622
630 623
631static inline int iwl_is_ready(struct iwl_priv *priv) 624static inline int iwl_is_ready(struct iwl_priv *priv)
@@ -677,15 +670,8 @@ extern int iwl_send_statistics_request(struct iwl_priv *priv,
677 u8 flags, bool clear); 670 u8 flags, bool clear);
678extern int iwl_verify_ucode(struct iwl_priv *priv); 671extern int iwl_verify_ucode(struct iwl_priv *priv);
679extern int iwl_send_lq_cmd(struct iwl_priv *priv, 672extern int iwl_send_lq_cmd(struct iwl_priv *priv,
680 struct iwl_link_quality_cmd *lq, u8 flags); 673 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
681extern void iwl_rx_reply_rx(struct iwl_priv *priv,
682 struct iwl_rx_mem_buffer *rxb);
683extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
684 struct iwl_rx_mem_buffer *rxb);
685void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
686 struct iwl_rx_mem_buffer *rxb);
687void iwl_apm_stop(struct iwl_priv *priv); 674void iwl_apm_stop(struct iwl_priv *priv);
688int iwl_apm_stop_master(struct iwl_priv *priv);
689int iwl_apm_init(struct iwl_priv *priv); 675int iwl_apm_init(struct iwl_priv *priv);
690 676
691void iwl_setup_rxon_timing(struct iwl_priv *priv); 677void iwl_setup_rxon_timing(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index b6e1b0ebe230..607a91f3eb6b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -561,8 +561,6 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
561 test_bit(STATUS_POWER_PMI, &priv->status)); 561 test_bit(STATUS_POWER_PMI, &priv->status));
562 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n", 562 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_FW_ERROR:\t %d\n",
563 test_bit(STATUS_FW_ERROR, &priv->status)); 563 test_bit(STATUS_FW_ERROR, &priv->status));
564 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_MODE_PENDING:\t %d\n",
565 test_bit(STATUS_MODE_PENDING, &priv->status));
566 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 564 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
567} 565}
568 566
@@ -661,7 +659,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
661 int pos = 0, i; 659 int pos = 0, i;
662 char buf[256]; 660 char buf[256];
663 const size_t bufsz = sizeof(buf); 661 const size_t bufsz = sizeof(buf);
664 ssize_t ret;
665 662
666 for (i = 0; i < AC_NUM; i++) { 663 for (i = 0; i < AC_NUM; i++) {
667 pos += scnprintf(buf + pos, bufsz - pos, 664 pos += scnprintf(buf + pos, bufsz - pos,
@@ -673,8 +670,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
673 priv->qos_data.def_qos_parm.ac[i].aifsn, 670 priv->qos_data.def_qos_parm.ac[i].aifsn,
674 priv->qos_data.def_qos_parm.ac[i].edca_txop); 671 priv->qos_data.def_qos_parm.ac[i].edca_txop);
675 } 672 }
676 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 673 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
677 return ret;
678} 674}
679 675
680static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 676static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
@@ -684,7 +680,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
684 int pos = 0; 680 int pos = 0;
685 char buf[256]; 681 char buf[256];
686 const size_t bufsz = sizeof(buf); 682 const size_t bufsz = sizeof(buf);
687 ssize_t ret;
688 683
689 pos += scnprintf(buf + pos, bufsz - pos, 684 pos += scnprintf(buf + pos, bufsz - pos,
690 "allow blinking: %s\n", 685 "allow blinking: %s\n",
@@ -698,8 +693,7 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
698 priv->last_blink_time); 693 priv->last_blink_time);
699 } 694 }
700 695
701 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 696 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
702 return ret;
703} 697}
704 698
705static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 699static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
@@ -712,7 +706,6 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
712 char buf[100]; 706 char buf[100];
713 int pos = 0; 707 int pos = 0;
714 const size_t bufsz = sizeof(buf); 708 const size_t bufsz = sizeof(buf);
715 ssize_t ret;
716 709
717 pos += scnprintf(buf + pos, bufsz - pos, 710 pos += scnprintf(buf + pos, bufsz - pos,
718 "Thermal Throttling Mode: %s\n", 711 "Thermal Throttling Mode: %s\n",
@@ -732,8 +725,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
732 "HT mode: %d\n", 725 "HT mode: %d\n",
733 restriction->is_ht); 726 restriction->is_ht);
734 } 727 }
735 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 728 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
736 return ret;
737} 729}
738 730
739static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file, 731static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
@@ -770,13 +762,11 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
770 char buf[100]; 762 char buf[100];
771 int pos = 0; 763 int pos = 0;
772 const size_t bufsz = sizeof(buf); 764 const size_t bufsz = sizeof(buf);
773 ssize_t ret;
774 765
775 pos += scnprintf(buf + pos, bufsz - pos, 766 pos += scnprintf(buf + pos, bufsz - pos,
776 "11n 40MHz Mode: %s\n", 767 "11n 40MHz Mode: %s\n",
777 priv->disable_ht40 ? "Disabled" : "Enabled"); 768 priv->disable_ht40 ? "Disabled" : "Enabled");
778 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 769 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
779 return ret;
780} 770}
781 771
782static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file, 772static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
@@ -2052,7 +2042,6 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2052 int pos = 0; 2042 int pos = 0;
2053 char buf[128]; 2043 char buf[128];
2054 const size_t bufsz = sizeof(buf); 2044 const size_t bufsz = sizeof(buf);
2055 ssize_t ret;
2056 2045
2057 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n", 2046 pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
2058 priv->event_log.ucode_trace ? "On" : "Off"); 2047 priv->event_log.ucode_trace ? "On" : "Off");
@@ -2063,8 +2052,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
2063 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n", 2052 pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
2064 priv->event_log.wraps_more_count); 2053 priv->event_log.wraps_more_count);
2065 2054
2066 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2055 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2067 return ret;
2068} 2056}
2069 2057
2070static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file, 2058static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
@@ -2096,6 +2084,31 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
2096 return count; 2084 return count;
2097} 2085}
2098 2086
2087static ssize_t iwl_dbgfs_rxon_flags_read(struct file *file,
2088 char __user *user_buf,
2089 size_t count, loff_t *ppos) {
2090
2091 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2092 int len = 0;
2093 char buf[20];
2094
2095 len = sprintf(buf, "0x%04X\n", le32_to_cpu(priv->active_rxon.flags));
2096 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
2097}
2098
2099static ssize_t iwl_dbgfs_rxon_filter_flags_read(struct file *file,
2100 char __user *user_buf,
2101 size_t count, loff_t *ppos) {
2102
2103 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
2104 int len = 0;
2105 char buf[20];
2106
2107 len = sprintf(buf, "0x%04X\n",
2108 le32_to_cpu(priv->active_rxon.filter_flags));
2109 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
2110}
2111
2099static ssize_t iwl_dbgfs_fh_reg_read(struct file *file, 2112static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2100 char __user *user_buf, 2113 char __user *user_buf,
2101 size_t count, loff_t *ppos) 2114 size_t count, loff_t *ppos)
@@ -2125,13 +2138,11 @@ static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
2125 int pos = 0; 2138 int pos = 0;
2126 char buf[12]; 2139 char buf[12];
2127 const size_t bufsz = sizeof(buf); 2140 const size_t bufsz = sizeof(buf);
2128 ssize_t ret;
2129 2141
2130 pos += scnprintf(buf + pos, bufsz - pos, "%d\n", 2142 pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
2131 priv->missed_beacon_threshold); 2143 priv->missed_beacon_threshold);
2132 2144
2133 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2145 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2134 return ret;
2135} 2146}
2136 2147
2137static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file, 2148static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
@@ -2160,27 +2171,6 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
2160 return count; 2171 return count;
2161} 2172}
2162 2173
2163static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
2164 const char __user *user_buf,
2165 size_t count, loff_t *ppos)
2166{
2167 struct iwl_priv *priv = file->private_data;
2168 char buf[8];
2169 int buf_size;
2170 int scan;
2171
2172 memset(buf, 0, sizeof(buf));
2173 buf_size = min(count, sizeof(buf) - 1);
2174 if (copy_from_user(buf, user_buf, buf_size))
2175 return -EFAULT;
2176 if (sscanf(buf, "%d", &scan) != 1)
2177 return -EINVAL;
2178
2179 iwl_internal_short_hw_scan(priv);
2180
2181 return count;
2182}
2183
2184static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file, 2174static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2185 char __user *user_buf, 2175 char __user *user_buf,
2186 size_t count, loff_t *ppos) { 2176 size_t count, loff_t *ppos) {
@@ -2189,13 +2179,11 @@ static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
2189 int pos = 0; 2179 int pos = 0;
2190 char buf[12]; 2180 char buf[12];
2191 const size_t bufsz = sizeof(buf); 2181 const size_t bufsz = sizeof(buf);
2192 ssize_t ret;
2193 2182
2194 pos += scnprintf(buf + pos, bufsz - pos, "%u\n", 2183 pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
2195 priv->cfg->plcp_delta_threshold); 2184 priv->cfg->plcp_delta_threshold);
2196 2185
2197 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 2186 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2198 return ret;
2199} 2187}
2200 2188
2201static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file, 2189static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
@@ -2296,9 +2284,10 @@ DEBUGFS_WRITE_FILE_OPS(csr);
2296DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing); 2284DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
2297DEBUGFS_READ_FILE_OPS(fh_reg); 2285DEBUGFS_READ_FILE_OPS(fh_reg);
2298DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon); 2286DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
2299DEBUGFS_WRITE_FILE_OPS(internal_scan);
2300DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta); 2287DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
2301DEBUGFS_READ_WRITE_FILE_OPS(force_reset); 2288DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
2289DEBUGFS_READ_FILE_OPS(rxon_flags);
2290DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
2302 2291
2303/* 2292/*
2304 * Create the debugfs files and directories 2293 * Create the debugfs files and directories
@@ -2350,7 +2339,6 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2350 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR); 2339 DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
2351 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR); 2340 DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
2352 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR); 2341 DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
2353 DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
2354 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR); 2342 DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
2355 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR); 2343 DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
2356 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 2344 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
@@ -2361,6 +2349,8 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
2361 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR); 2349 DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
2362 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR); 2350 DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
2363 } 2351 }
2352 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
2353 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
2364 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal); 2354 DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
2365 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf, 2355 DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
2366 &priv->disable_chain_noise_cal); 2356 &priv->disable_chain_noise_cal);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index ef1720a852e9..b2d94c7c6457 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,6 +43,7 @@
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-4965-hw.h" 44#include "iwl-4965-hw.h"
45#include "iwl-3945-hw.h" 45#include "iwl-3945-hw.h"
46#include "iwl-agn-hw.h"
46#include "iwl-led.h" 47#include "iwl-led.h"
47#include "iwl-power.h" 48#include "iwl-power.h"
48#include "iwl-agn-rs.h" 49#include "iwl-agn-rs.h"
@@ -57,6 +58,7 @@ extern struct iwl_cfg iwl5100_abg_cfg;
57extern struct iwl_cfg iwl5150_agn_cfg; 58extern struct iwl_cfg iwl5150_agn_cfg;
58extern struct iwl_cfg iwl5150_abg_cfg; 59extern struct iwl_cfg iwl5150_abg_cfg;
59extern struct iwl_cfg iwl6000i_2agn_cfg; 60extern struct iwl_cfg iwl6000i_2agn_cfg;
61extern struct iwl_cfg iwl6000i_g2_2agn_cfg;
60extern struct iwl_cfg iwl6000i_2abg_cfg; 62extern struct iwl_cfg iwl6000i_2abg_cfg;
61extern struct iwl_cfg iwl6000i_2bg_cfg; 63extern struct iwl_cfg iwl6000i_2bg_cfg;
62extern struct iwl_cfg iwl6000_3agn_cfg; 64extern struct iwl_cfg iwl6000_3agn_cfg;
@@ -67,45 +69,6 @@ extern struct iwl_cfg iwl1000_bg_cfg;
67 69
68struct iwl_tx_queue; 70struct iwl_tx_queue;
69 71
70/* shared structures from iwl-5000.c */
71extern struct iwl_mod_params iwl50_mod_params;
72extern struct iwl_ucode_ops iwl5000_ucode;
73extern struct iwl_lib_ops iwl5000_lib;
74extern struct iwl_hcmd_ops iwl5000_hcmd;
75extern struct iwl_hcmd_utils_ops iwl5000_hcmd_utils;
76
77/* shared functions from iwl-5000.c */
78extern u16 iwl5000_get_hcmd_size(u8 cmd_id, u16 len);
79extern u16 iwl5000_build_addsta_hcmd(const struct iwl_addsta_cmd *cmd,
80 u8 *data);
81extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
82 __le32 *tx_flags);
83extern int iwl5000_calc_rssi(struct iwl_priv *priv,
84 struct iwl_rx_phy_res *rx_resp);
85extern void iwl5000_nic_config(struct iwl_priv *priv);
86extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
87extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
88 size_t offset);
89extern void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
90 struct iwl_tx_queue *txq,
91 u16 byte_cnt);
92extern void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
93 struct iwl_tx_queue *txq);
94extern int iwl5000_load_ucode(struct iwl_priv *priv);
95extern void iwl5000_init_alive_start(struct iwl_priv *priv);
96extern int iwl5000_alive_notify(struct iwl_priv *priv);
97extern int iwl5000_hw_set_hw_params(struct iwl_priv *priv);
98extern int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
99 int tx_fifo, int sta_id, int tid, u16 ssn_idx);
100extern int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
101 u16 ssn_idx, u8 tx_fifo);
102extern void iwl5000_txq_set_sched(struct iwl_priv *priv, u32 mask);
103extern void iwl5000_setup_deferred_work(struct iwl_priv *priv);
104extern void iwl5000_rx_handler_setup(struct iwl_priv *priv);
105extern int iwl5000_hw_valid_rtc_data_addr(u32 addr);
106extern int iwl5000_send_tx_power(struct iwl_priv *priv);
107extern void iwl5000_temperature(struct iwl_priv *priv);
108
109/* CT-KILL constants */ 72/* CT-KILL constants */
110#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */ 73#define CT_KILL_THRESHOLD_LEGACY 110 /* in Celsius */
111#define CT_KILL_THRESHOLD 114 /* in Celsius */ 74#define CT_KILL_THRESHOLD 114 /* in Celsius */
@@ -183,6 +146,10 @@ struct iwl_queue {
183 int n_bd; /* number of BDs in this queue */ 146 int n_bd; /* number of BDs in this queue */
184 int write_ptr; /* 1-st empty entry (index) host_w*/ 147 int write_ptr; /* 1-st empty entry (index) host_w*/
185 int read_ptr; /* last used entry (index) host_r*/ 148 int read_ptr; /* last used entry (index) host_r*/
149 /* use for monitoring and recovering the stuck queue */
150 int last_read_ptr; /* storing the last read_ptr */
151 /* number of time read_ptr and last_read_ptr are the same */
152 u8 repeat_same_read_ptr;
186 dma_addr_t dma_addr; /* physical addr for BD's */ 153 dma_addr_t dma_addr; /* physical addr for BD's */
187 int n_window; /* safe queue window */ 154 int n_window; /* safe queue window */
188 u32 id; 155 u32 id;
@@ -304,13 +271,11 @@ struct iwl_channel_info {
304 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES]; 271 struct iwl3945_scan_power_info scan_pwr_info[IWL_NUM_SCAN_RATES];
305}; 272};
306 273
307#define IWL_TX_FIFO_AC0 0 274#define IWL_TX_FIFO_BK 0
308#define IWL_TX_FIFO_AC1 1 275#define IWL_TX_FIFO_BE 1
309#define IWL_TX_FIFO_AC2 2 276#define IWL_TX_FIFO_VI 2
310#define IWL_TX_FIFO_AC3 3 277#define IWL_TX_FIFO_VO 3
311#define IWL_TX_FIFO_HCCA_1 5 278#define IWL_TX_FIFO_UNUSED -1
312#define IWL_TX_FIFO_HCCA_2 6
313#define IWL_TX_FIFO_NONE 7
314 279
315/* Minimum number of queues. MAX_NUM is defined in hw specific files. 280/* Minimum number of queues. MAX_NUM is defined in hw specific files.
316 * Set the minimum to accommodate the 4 standard TX queues, 1 command 281 * Set the minimum to accommodate the 4 standard TX queues, 1 command
@@ -361,13 +326,6 @@ enum {
361 326
362#define DEF_CMD_PAYLOAD_SIZE 320 327#define DEF_CMD_PAYLOAD_SIZE 320
363 328
364/*
365 * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
366 * SNAP header and alignment. It should also be big enough for 802.11
367 * control frames.
368 */
369#define IWL_LINK_HDR_MAX 64
370
371/** 329/**
372 * struct iwl_device_cmd 330 * struct iwl_device_cmd
373 * 331 *
@@ -519,38 +477,24 @@ struct iwl_ht_config {
519 u8 non_GF_STA_present; 477 u8 non_GF_STA_present;
520}; 478};
521 479
522union iwl_qos_capabity {
523 struct {
524 u8 edca_count:4; /* bit 0-3 */
525 u8 q_ack:1; /* bit 4 */
526 u8 queue_request:1; /* bit 5 */
527 u8 txop_request:1; /* bit 6 */
528 u8 reserved:1; /* bit 7 */
529 } q_AP;
530 struct {
531 u8 acvo_APSD:1; /* bit 0 */
532 u8 acvi_APSD:1; /* bit 1 */
533 u8 ac_bk_APSD:1; /* bit 2 */
534 u8 ac_be_APSD:1; /* bit 3 */
535 u8 q_ack:1; /* bit 4 */
536 u8 max_len:2; /* bit 5-6 */
537 u8 more_data_ack:1; /* bit 7 */
538 } q_STA;
539 u8 val;
540};
541
542/* QoS structures */ 480/* QoS structures */
543struct iwl_qos_info { 481struct iwl_qos_info {
544 int qos_active; 482 int qos_active;
545 union iwl_qos_capabity qos_cap;
546 struct iwl_qosparam_cmd def_qos_parm; 483 struct iwl_qosparam_cmd def_qos_parm;
547}; 484};
548 485
486/*
487 * Structure should be accessed with sta_lock held. When station addition
488 * is in progress (IWL_STA_UCODE_INPROGRESS) it is possible to access only
489 * the commands (iwl_addsta_cmd and iwl_link_quality_cmd) without sta_lock
490 * held.
491 */
549struct iwl_station_entry { 492struct iwl_station_entry {
550 struct iwl_addsta_cmd sta; 493 struct iwl_addsta_cmd sta;
551 struct iwl_tid_data tid[MAX_TID_COUNT]; 494 struct iwl_tid_data tid[MAX_TID_COUNT];
552 u8 used; 495 u8 used;
553 struct iwl_hw_key keyinfo; 496 struct iwl_hw_key keyinfo;
497 struct iwl_link_quality_cmd *lq;
554}; 498};
555 499
556/* 500/*
@@ -1039,6 +983,11 @@ struct iwl_event_log {
1039#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 983#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1040#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) 984#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1041 985
986/* timer constants use to monitor and recover stuck tx queues in mSecs */
987#define IWL_MONITORING_PERIOD (1000)
988#define IWL_ONE_HUNDRED_MSECS (100)
989#define IWL_SIXTY_SECS (60000)
990
1042enum iwl_reset { 991enum iwl_reset {
1043 IWL_RF_RESET = 0, 992 IWL_RF_RESET = 0,
1044 IWL_FW_RESET, 993 IWL_FW_RESET,
@@ -1092,10 +1041,6 @@ struct iwl_priv {
1092 struct iwl_channel_info *channel_info; /* channel info array */ 1041 struct iwl_channel_info *channel_info; /* channel info array */
1093 u8 channel_count; /* # of channels */ 1042 u8 channel_count; /* # of channels */
1094 1043
1095 /* each calibration channel group in the EEPROM has a derived
1096 * clip setting for each rate. 3945 only.*/
1097 const struct iwl3945_clip_group clip39_groups[5];
1098
1099 /* thermal calibration */ 1044 /* thermal calibration */
1100 s32 temperature; /* degrees Kelvin */ 1045 s32 temperature; /* degrees Kelvin */
1101 s32 last_temperature; 1046 s32 last_temperature;
@@ -1168,16 +1113,13 @@ struct iwl_priv {
1168 u64 led_tpt; 1113 u64 led_tpt;
1169 1114
1170 u16 active_rate; 1115 u16 active_rate;
1171 u16 active_rate_basic;
1172 1116
1173 u8 assoc_station_added;
1174 u8 start_calib; 1117 u8 start_calib;
1175 struct iwl_sensitivity_data sensitivity_data; 1118 struct iwl_sensitivity_data sensitivity_data;
1176 struct iwl_chain_noise_data chain_noise_data; 1119 struct iwl_chain_noise_data chain_noise_data;
1177 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1120 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1178 1121
1179 struct iwl_ht_config current_ht_config; 1122 struct iwl_ht_config current_ht_config;
1180 u8 last_phy_res[100];
1181 1123
1182 /* Rate scaling data */ 1124 /* Rate scaling data */
1183 u8 retry_rate; 1125 u8 retry_rate;
@@ -1197,9 +1139,6 @@ struct iwl_priv {
1197 1139
1198 unsigned long status; 1140 unsigned long status;
1199 1141
1200 int last_rx_rssi; /* From Rx packet statistics */
1201 int last_rx_noise; /* From beacon statistics */
1202
1203 /* counts mgmt, ctl, and data packets */ 1142 /* counts mgmt, ctl, and data packets */
1204 struct traffic_stats tx_stats; 1143 struct traffic_stats tx_stats;
1205 struct traffic_stats rx_stats; 1144 struct traffic_stats rx_stats;
@@ -1218,8 +1157,6 @@ struct iwl_priv {
1218#endif 1157#endif
1219 1158
1220 /* context information */ 1159 /* context information */
1221 u16 rates_mask;
1222
1223 u8 bssid[ETH_ALEN]; 1160 u8 bssid[ETH_ALEN];
1224 u16 rts_threshold; 1161 u16 rts_threshold;
1225 u8 mac_addr[ETH_ALEN]; 1162 u8 mac_addr[ETH_ALEN];
@@ -1228,8 +1165,7 @@ struct iwl_priv {
1228 spinlock_t sta_lock; 1165 spinlock_t sta_lock;
1229 int num_stations; 1166 int num_stations;
1230 struct iwl_station_entry stations[IWL_STATION_COUNT]; 1167 struct iwl_station_entry stations[IWL_STATION_COUNT];
1231 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; 1168 struct iwl_wep_key wep_keys[WEP_KEYS_MAX]; /* protected by mutex */
1232 u8 default_wep_key;
1233 u8 key_mapping_key; 1169 u8 key_mapping_key;
1234 unsigned long ucode_key_table; 1170 unsigned long ucode_key_table;
1235 1171
@@ -1244,10 +1180,6 @@ struct iwl_priv {
1244 1180
1245 u8 mac80211_registered; 1181 u8 mac80211_registered;
1246 1182
1247 /* Rx'd packet timing information */
1248 u32 last_beacon_time;
1249 u64 last_tsf;
1250
1251 /* eeprom -- this is in the card's little endian byte order */ 1183 /* eeprom -- this is in the card's little endian byte order */
1252 u8 *eeprom; 1184 u8 *eeprom;
1253 int nvm_device_type; 1185 int nvm_device_type;
@@ -1262,20 +1194,56 @@ struct iwl_priv {
1262 u16 beacon_int; 1194 u16 beacon_int;
1263 struct ieee80211_vif *vif; 1195 struct ieee80211_vif *vif;
1264 1196
1265 /*Added for 3945 */ 1197 union {
1266 void *shared_virt; 1198#if defined(CONFIG_IWL3945) || defined(CONFIG_IWL3945_MODULE)
1267 dma_addr_t shared_phys; 1199 struct {
1268 /*End*/ 1200 void *shared_virt;
1269 struct iwl_hw_params hw_params; 1201 dma_addr_t shared_phys;
1202
1203 struct delayed_work thermal_periodic;
1204 struct delayed_work rfkill_poll;
1205
1206 struct iwl3945_notif_statistics statistics;
1207
1208 u32 sta_supp_rates;
1209 int last_rx_rssi; /* From Rx packet statistics */
1210
1211 /* Rx'd packet timing information */
1212 u32 last_beacon_time;
1213 u64 last_tsf;
1214
1215 /*
1216 * each calibration channel group in the
1217 * EEPROM has a derived clip setting for
1218 * each rate.
1219 */
1220 const struct iwl3945_clip_group clip_groups[5];
1221
1222 } _3945;
1223#endif
1224#if defined(CONFIG_IWLAGN) || defined(CONFIG_IWLAGN_MODULE)
1225 struct {
1226 /* INT ICT Table */
1227 __le32 *ict_tbl;
1228 void *ict_tbl_vir;
1229 dma_addr_t ict_tbl_dma;
1230 dma_addr_t aligned_ict_tbl_dma;
1231 int ict_index;
1232 u32 inta;
1233 bool use_ict;
1234 /*
1235 * reporting the number of tids has AGG on. 0 means
1236 * no AGGREGATION
1237 */
1238 u8 agg_tids_count;
1239
1240 struct iwl_rx_phy_res last_phy_res;
1241 bool last_phy_res_valid;
1242 } _agn;
1243#endif
1244 };
1270 1245
1271 /* INT ICT Table */ 1246 struct iwl_hw_params hw_params;
1272 __le32 *ict_tbl;
1273 dma_addr_t ict_tbl_dma;
1274 dma_addr_t aligned_ict_tbl_dma;
1275 int ict_index;
1276 void *ict_tbl_vir;
1277 u32 inta;
1278 bool use_ict;
1279 1247
1280 u32 inta_mask; 1248 u32 inta_mask;
1281 /* Current association information needed to configure the 1249 /* Current association information needed to configure the
@@ -1304,10 +1272,6 @@ struct iwl_priv {
1304 struct delayed_work alive_start; 1272 struct delayed_work alive_start;
1305 struct delayed_work scan_check; 1273 struct delayed_work scan_check;
1306 1274
1307 /*For 3945 only*/
1308 struct delayed_work thermal_periodic;
1309 struct delayed_work rfkill_poll;
1310
1311 /* TX Power */ 1275 /* TX Power */
1312 s8 tx_power_user_lmt; 1276 s8 tx_power_user_lmt;
1313 s8 tx_power_device_lmt; 1277 s8 tx_power_device_lmt;
@@ -1339,13 +1303,8 @@ struct iwl_priv {
1339 struct work_struct run_time_calib_work; 1303 struct work_struct run_time_calib_work;
1340 struct timer_list statistics_periodic; 1304 struct timer_list statistics_periodic;
1341 struct timer_list ucode_trace; 1305 struct timer_list ucode_trace;
1306 struct timer_list monitor_recover;
1342 bool hw_ready; 1307 bool hw_ready;
1343 /*For 3945*/
1344#define IWL_DEFAULT_TX_POWER 0x0F
1345
1346 struct iwl3945_notif_statistics statistics_39;
1347
1348 u32 sta_supp_rates;
1349 1308
1350 struct iwl_event_log event_log; 1309 struct iwl_event_log event_log;
1351}; /*iwl_priv */ 1310}; /*iwl_priv */
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 2ffc2edbf4f0..4a487639d932 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,6 +37,7 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite8);
37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32); 37EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32); 38EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx); 39EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_tx);
40EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event); 41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
41EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error); 42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
42EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event); 43EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 8171c701e4e1..ef0e3256eec2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -188,19 +188,19 @@ struct iwl_eeprom_enhanced_txpwr {
188/* 5000 regulatory - indirect access */ 188/* 5000 regulatory - indirect access */
189#define EEPROM_5000_REG_SKU_ID ((0x02)\ 189#define EEPROM_5000_REG_SKU_ID ((0x02)\
190 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */ 190 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 4 bytes */
191#define EEPROM_5000_REG_BAND_1_CHANNELS ((0x08)\ 191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */ 192 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 28 bytes */
193#define EEPROM_5000_REG_BAND_2_CHANNELS ((0x26)\ 193#define EEPROM_REG_BAND_2_CHANNELS ((0x26)\
194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */ 194 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 26 bytes */
195#define EEPROM_5000_REG_BAND_3_CHANNELS ((0x42)\ 195#define EEPROM_REG_BAND_3_CHANNELS ((0x42)\
196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */ 196 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
197#define EEPROM_5000_REG_BAND_4_CHANNELS ((0x5C)\ 197#define EEPROM_REG_BAND_4_CHANNELS ((0x5C)\
198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 198 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
199#define EEPROM_5000_REG_BAND_5_CHANNELS ((0x74)\ 199#define EEPROM_REG_BAND_5_CHANNELS ((0x74)\
200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */ 200 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 12 bytes */
201#define EEPROM_5000_REG_BAND_24_HT40_CHANNELS ((0x82)\ 201#define EEPROM_REG_BAND_24_HT40_CHANNELS ((0x82)\
202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ 202 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
203#define EEPROM_5000_REG_BAND_52_HT40_CHANNELS ((0x92)\ 203#define EEPROM_REG_BAND_52_HT40_CHANNELS ((0x92)\
204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */ 204 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 22 bytes */
205 205
206/* 6000 regulatory - indirect access */ 206/* 6000 regulatory - indirect access */
@@ -265,12 +265,15 @@ struct iwl_eeprom_enhanced_txpwr {
265#define EEPROM_5050_EEPROM_VERSION (0x21E) 265#define EEPROM_5050_EEPROM_VERSION (0x21E)
266 266
267/* 1000 Specific */ 267/* 1000 Specific */
268#define EEPROM_1000_TX_POWER_VERSION (4)
268#define EEPROM_1000_EEPROM_VERSION (0x15C) 269#define EEPROM_1000_EEPROM_VERSION (0x15C)
269 270
270/* 6x00 Specific */ 271/* 6x00 Specific */
272#define EEPROM_6000_TX_POWER_VERSION (4)
271#define EEPROM_6000_EEPROM_VERSION (0x434) 273#define EEPROM_6000_EEPROM_VERSION (0x434)
272 274
273/* 6x50 Specific */ 275/* 6x50 Specific */
276#define EEPROM_6050_TX_POWER_VERSION (4)
274#define EEPROM_6050_EEPROM_VERSION (0x532) 277#define EEPROM_6050_EEPROM_VERSION (0x532)
275 278
276/* OTP */ 279/* OTP */
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 73681c4fefe7..51f89e7ba681 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -169,7 +169,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
169 mutex_lock(&priv->sync_cmd_mutex); 169 mutex_lock(&priv->sync_cmd_mutex);
170 170
171 set_bit(STATUS_HCMD_ACTIVE, &priv->status); 171 set_bit(STATUS_HCMD_ACTIVE, &priv->status);
172 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n", 172 IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s\n",
173 get_cmd_string(cmd->id)); 173 get_cmd_string(cmd->id));
174 174
175 cmd_idx = iwl_enqueue_hcmd(priv, cmd); 175 cmd_idx = iwl_enqueue_hcmd(priv, cmd);
@@ -191,7 +191,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
191 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 191 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
192 192
193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 193 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
194 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", 194 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
195 get_cmd_string(cmd->id)); 195 get_cmd_string(cmd->id));
196 ret = -ETIMEDOUT; 196 ret = -ETIMEDOUT;
197 goto cancel; 197 goto cancel;
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 51a67fb2e185..3ff6b9d25a10 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -31,6 +31,9 @@
31#define __iwl_helpers_h__ 31#define __iwl_helpers_h__
32 32
33#include <linux/ctype.h> 33#include <linux/ctype.h>
34#include <net/mac80211.h>
35
36#include "iwl-io.h"
34 37
35#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo)))) 38#define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
36 39
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index 16eb3ced9b30..0203a3bbf872 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -298,7 +298,7 @@ static inline u32 __iwl_read_direct32(const char *f, u32 l,
298 struct iwl_priv *priv, u32 reg) 298 struct iwl_priv *priv, u32 reg)
299{ 299{
300 u32 value = _iwl_read_direct32(priv, reg); 300 u32 value = _iwl_read_direct32(priv, reg);
301 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d \n", reg, value, 301 IWL_DEBUG_IO(priv, "read_direct32(0x%4X) = 0x%08x - %s %d\n", reg, value,
302 f, l); 302 f, l);
303 return value; 303 return value;
304} 304}
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index a6f9c918aabc..db5bfcb036ca 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -46,7 +46,7 @@
46static int led_mode; 46static int led_mode;
47module_param(led_mode, int, S_IRUGO); 47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " 48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
49 "(default 0)\n"); 49 "(default 0)");
50 50
51 51
52static const struct { 52static const struct {
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 548dac2f6a96..581c683a8507 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -384,10 +384,10 @@ EXPORT_SYMBOL(iwl_ht_enabled);
384 384
385bool iwl_within_ct_kill_margin(struct iwl_priv *priv) 385bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
386{ 386{
387 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ 387 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
388 bool within_margin = false; 388 bool within_margin = false;
389 389
390 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 390 if (priv->cfg->temperature_kelvin)
391 temp = KELVIN_TO_CELSIUS(priv->temperature); 391 temp = KELVIN_TO_CELSIUS(priv->temperature);
392 392
393 if (!priv->thermal_throttle.advanced_tt) 393 if (!priv->thermal_throttle.advanced_tt)
@@ -840,12 +840,12 @@ EXPORT_SYMBOL(iwl_tt_exit_ct_kill);
840static void iwl_bg_tt_work(struct work_struct *work) 840static void iwl_bg_tt_work(struct work_struct *work)
841{ 841{
842 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work); 842 struct iwl_priv *priv = container_of(work, struct iwl_priv, tt_work);
843 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */ 843 s32 temp = priv->temperature; /* degrees CELSIUS except specified */
844 844
845 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 845 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
846 return; 846 return;
847 847
848 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 848 if (priv->cfg->temperature_kelvin)
849 temp = KELVIN_TO_CELSIUS(priv->temperature); 849 temp = KELVIN_TO_CELSIUS(priv->temperature);
850 850
851 if (!priv->thermal_throttle.advanced_tt) 851 if (!priv->thermal_throttle.advanced_tt)
@@ -875,7 +875,7 @@ void iwl_tt_initialize(struct iwl_priv *priv)
875 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1); 875 int size = sizeof(struct iwl_tt_trans) * (IWL_TI_STATE_MAX - 1);
876 struct iwl_tt_trans *transaction; 876 struct iwl_tt_trans *transaction;
877 877
878 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling \n"); 878 IWL_DEBUG_POWER(priv, "Initialize Thermal Throttling\n");
879 879
880 memset(tt, 0, sizeof(struct iwl_tt_mgmt)); 880 memset(tt, 0, sizeof(struct iwl_tt_mgmt));
881 881
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index d2d2a9174900..5944de7a98a2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -254,7 +254,7 @@
254 * device. A queue maps to only one (selectable by driver) Tx DMA channel, 254 * device. A queue maps to only one (selectable by driver) Tx DMA channel,
255 * but one DMA channel may take input from several queues. 255 * but one DMA channel may take input from several queues.
256 * 256 *
257 * Tx DMA channels have dedicated purposes. For 4965, they are used as follows 257 * Tx DMA FIFOs have dedicated purposes. For 4965, they are used as follows
258 * (cf. default_queue_to_tx_fifo in iwl-4965.c): 258 * (cf. default_queue_to_tx_fifo in iwl-4965.c):
259 * 259 *
260 * 0 -- EDCA BK (background) frames, lowest priority 260 * 0 -- EDCA BK (background) frames, lowest priority
@@ -262,20 +262,20 @@
262 * 2 -- EDCA VI (video) frames, higher priority 262 * 2 -- EDCA VI (video) frames, higher priority
263 * 3 -- EDCA VO (voice) and management frames, highest priority 263 * 3 -- EDCA VO (voice) and management frames, highest priority
264 * 4 -- Commands (e.g. RXON, etc.) 264 * 4 -- Commands (e.g. RXON, etc.)
265 * 5 -- HCCA short frames 265 * 5 -- unused (HCCA)
266 * 6 -- HCCA long frames 266 * 6 -- unused (HCCA)
267 * 7 -- not used by driver (device-internal only) 267 * 7 -- not used by driver (device-internal only)
268 * 268 *
269 * For 5000 series and up, they are used slightly differently 269 * For 5000 series and up, they are used differently
270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c): 270 * (cf. iwl5000_default_queue_to_tx_fifo in iwl-5000.c):
271 * 271 *
272 * 0 -- EDCA BK (background) frames, lowest priority 272 * 0 -- EDCA BK (background) frames, lowest priority
273 * 1 -- EDCA BE (best effort) frames, normal priority 273 * 1 -- EDCA BE (best effort) frames, normal priority
274 * 2 -- EDCA VI (video) frames, higher priority 274 * 2 -- EDCA VI (video) frames, higher priority
275 * 3 -- EDCA VO (voice) and management frames, highest priority 275 * 3 -- EDCA VO (voice) and management frames, highest priority
276 * 4 -- (TBD) 276 * 4 -- unused
277 * 5 -- HCCA short frames 277 * 5 -- unused
278 * 6 -- HCCA long frames 278 * 6 -- unused
279 * 7 -- Commands 279 * 7 -- Commands
280 * 280 *
281 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6. 281 * Driver should normally map queues 0-6 to Tx DMA/FIFO channels 0-6.
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index e5eb339107dd..267eb8935902 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -163,197 +163,6 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
163 spin_unlock_irqrestore(&q->lock, flags); 163 spin_unlock_irqrestore(&q->lock, flags);
164} 164}
165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr); 165EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
166/**
167 * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
168 */
169static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
170 dma_addr_t dma_addr)
171{
172 return cpu_to_le32((u32)(dma_addr >> 8));
173}
174
175/**
176 * iwl_rx_queue_restock - refill RX queue from pre-allocated pool
177 *
178 * If there are slots in the RX queue that need to be restocked,
179 * and we have free pre-allocated buffers, fill the ranks as much
180 * as we can, pulling from rx_free.
181 *
182 * This moves the 'write' index forward to catch up with 'processed', and
183 * also updates the memory address in the firmware to reference the new
184 * target buffer.
185 */
186void iwl_rx_queue_restock(struct iwl_priv *priv)
187{
188 struct iwl_rx_queue *rxq = &priv->rxq;
189 struct list_head *element;
190 struct iwl_rx_mem_buffer *rxb;
191 unsigned long flags;
192 int write;
193
194 spin_lock_irqsave(&rxq->lock, flags);
195 write = rxq->write & ~0x7;
196 while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
197 /* Get next free Rx buffer, remove from free list */
198 element = rxq->rx_free.next;
199 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
200 list_del(element);
201
202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--;
207 }
208 spin_unlock_irqrestore(&rxq->lock, flags);
209 /* If the pre-allocated buffer pool is dropping low, schedule to
210 * refill it */
211 if (rxq->free_count <= RX_LOW_WATERMARK)
212 queue_work(priv->workqueue, &priv->rx_replenish);
213
214
215 /* If we've added more space for the firmware to place data, tell it.
216 * Increment device's write pointer in multiples of 8. */
217 if (rxq->write_actual != (rxq->write & ~0x7)) {
218 spin_lock_irqsave(&rxq->lock, flags);
219 rxq->need_update = 1;
220 spin_unlock_irqrestore(&rxq->lock, flags);
221 iwl_rx_queue_update_write_ptr(priv, rxq);
222 }
223}
224EXPORT_SYMBOL(iwl_rx_queue_restock);
225
226
227/**
228 * iwl_rx_replenish - Move all used packet from rx_used to rx_free
229 *
230 * When moving to rx_free an SKB is allocated for the slot.
231 *
232 * Also restock the Rx queue via iwl_rx_queue_restock.
233 * This is called as a scheduled work item (except for during initialization)
234 */
235void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
236{
237 struct iwl_rx_queue *rxq = &priv->rxq;
238 struct list_head *element;
239 struct iwl_rx_mem_buffer *rxb;
240 struct page *page;
241 unsigned long flags;
242 gfp_t gfp_mask = priority;
243
244 while (1) {
245 spin_lock_irqsave(&rxq->lock, flags);
246 if (list_empty(&rxq->rx_used)) {
247 spin_unlock_irqrestore(&rxq->lock, flags);
248 return;
249 }
250 spin_unlock_irqrestore(&rxq->lock, flags);
251
252 if (rxq->free_count > RX_LOW_WATERMARK)
253 gfp_mask |= __GFP_NOWARN;
254
255 if (priv->hw_params.rx_page_order > 0)
256 gfp_mask |= __GFP_COMP;
257
258 /* Alloc a new receive buffer */
259 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
260 if (!page) {
261 if (net_ratelimit())
262 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
263 "order: %d\n",
264 priv->hw_params.rx_page_order);
265
266 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
267 net_ratelimit())
268 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
269 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
270 rxq->free_count);
271 /* We don't reschedule replenish work here -- we will
272 * call the restock method and if it still needs
273 * more buffers it will schedule replenish */
274 return;
275 }
276
277 spin_lock_irqsave(&rxq->lock, flags);
278
279 if (list_empty(&rxq->rx_used)) {
280 spin_unlock_irqrestore(&rxq->lock, flags);
281 __free_pages(page, priv->hw_params.rx_page_order);
282 return;
283 }
284 element = rxq->rx_used.next;
285 rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
286 list_del(element);
287
288 spin_unlock_irqrestore(&rxq->lock, flags);
289
290 rxb->page = page;
291 /* Get physical address of the RB */
292 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
293 PAGE_SIZE << priv->hw_params.rx_page_order,
294 PCI_DMA_FROMDEVICE);
295 /* dma address must be no more than 36 bits */
296 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
297 /* and also 256 byte aligned! */
298 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
299
300 spin_lock_irqsave(&rxq->lock, flags);
301
302 list_add_tail(&rxb->list, &rxq->rx_free);
303 rxq->free_count++;
304 priv->alloc_rxb_page++;
305
306 spin_unlock_irqrestore(&rxq->lock, flags);
307 }
308}
309
310void iwl_rx_replenish(struct iwl_priv *priv)
311{
312 unsigned long flags;
313
314 iwl_rx_allocate(priv, GFP_KERNEL);
315
316 spin_lock_irqsave(&priv->lock, flags);
317 iwl_rx_queue_restock(priv);
318 spin_unlock_irqrestore(&priv->lock, flags);
319}
320EXPORT_SYMBOL(iwl_rx_replenish);
321
322void iwl_rx_replenish_now(struct iwl_priv *priv)
323{
324 iwl_rx_allocate(priv, GFP_ATOMIC);
325
326 iwl_rx_queue_restock(priv);
327}
328EXPORT_SYMBOL(iwl_rx_replenish_now);
329
330
331/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
332 * If an SKB has been detached, the POOL needs to have its SKB set to NULL
333 * This free routine walks the list of POOL entries and if SKB is set to
334 * non NULL it is unmapped and freed
335 */
336void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
337{
338 int i;
339 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
340 if (rxq->pool[i].page != NULL) {
341 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
342 PAGE_SIZE << priv->hw_params.rx_page_order,
343 PCI_DMA_FROMDEVICE);
344 __iwl_free_pages(priv, rxq->pool[i].page);
345 rxq->pool[i].page = NULL;
346 }
347 }
348
349 dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
350 rxq->dma_addr);
351 dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
352 rxq->rb_stts, rxq->rb_stts_dma);
353 rxq->bd = NULL;
354 rxq->rb_stts = NULL;
355}
356EXPORT_SYMBOL(iwl_rx_queue_free);
357 166
358int iwl_rx_queue_alloc(struct iwl_priv *priv) 167int iwl_rx_queue_alloc(struct iwl_priv *priv)
359{ 168{
@@ -396,98 +205,6 @@ err_bd:
396} 205}
397EXPORT_SYMBOL(iwl_rx_queue_alloc); 206EXPORT_SYMBOL(iwl_rx_queue_alloc);
398 207
399void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
400{
401 unsigned long flags;
402 int i;
403 spin_lock_irqsave(&rxq->lock, flags);
404 INIT_LIST_HEAD(&rxq->rx_free);
405 INIT_LIST_HEAD(&rxq->rx_used);
406 /* Fill the rx_used queue with _all_ of the Rx buffers */
407 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
408 /* In the reset function, these buffers may have been allocated
409 * to an SKB, so we need to unmap and free potential storage */
410 if (rxq->pool[i].page != NULL) {
411 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
412 PAGE_SIZE << priv->hw_params.rx_page_order,
413 PCI_DMA_FROMDEVICE);
414 __iwl_free_pages(priv, rxq->pool[i].page);
415 rxq->pool[i].page = NULL;
416 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 }
419
420 /* Set us so that we have processed and used all buffers, but have
421 * not restocked the Rx queue with fresh buffers */
422 rxq->read = rxq->write = 0;
423 rxq->write_actual = 0;
424 rxq->free_count = 0;
425 spin_unlock_irqrestore(&rxq->lock, flags);
426}
427
428int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
429{
430 u32 rb_size;
431 const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
432 u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
433
434 if (!priv->cfg->use_isr_legacy)
435 rb_timeout = RX_RB_TIMEOUT;
436
437 if (priv->cfg->mod_params->amsdu_size_8K)
438 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
439 else
440 rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
441
442 /* Stop Rx DMA */
443 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
444
445 /* Reset driver's Rx queue write index */
446 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
447
448 /* Tell device where to find RBD circular buffer in DRAM */
449 iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
450 (u32)(rxq->dma_addr >> 8));
451
452 /* Tell device where in DRAM to update its Rx status */
453 iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
454 rxq->rb_stts_dma >> 4);
455
456 /* Enable Rx DMA
457 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
458 * the credit mechanism in 5000 HW RX FIFO
459 * Direct rx interrupts to hosts
460 * Rx buffer size 4 or 8k
461 * RB timeout 0x10
462 * 256 RBDs
463 */
464 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
465 FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
466 FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
467 FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
468 FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
469 rb_size|
470 (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
471 (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
472
473 /* Set interrupt coalescing timer to default (2048 usecs) */
474 iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
475
476 return 0;
477}
478
479int iwl_rxq_stop(struct iwl_priv *priv)
480{
481
482 /* stop Rx DMA */
483 iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
484 iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
485 FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
486
487 return 0;
488}
489EXPORT_SYMBOL(iwl_rxq_stop);
490
491void iwl_rx_missed_beacon_notif(struct iwl_priv *priv, 208void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
492 struct iwl_rx_mem_buffer *rxb) 209 struct iwl_rx_mem_buffer *rxb)
493 210
@@ -543,6 +260,7 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
543 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER; 260 le32_to_cpu(rx_info->beacon_silence_rssi_b) & IN_BAND_FILTER;
544 int bcn_silence_c = 261 int bcn_silence_c =
545 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER; 262 le32_to_cpu(rx_info->beacon_silence_rssi_c) & IN_BAND_FILTER;
263 int last_rx_noise;
546 264
547 if (bcn_silence_a) { 265 if (bcn_silence_a) {
548 total_silence += bcn_silence_a; 266 total_silence += bcn_silence_a;
@@ -559,13 +277,13 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
559 277
560 /* Average among active antennas */ 278 /* Average among active antennas */
561 if (num_active_rx) 279 if (num_active_rx)
562 priv->last_rx_noise = (total_silence / num_active_rx) - 107; 280 last_rx_noise = (total_silence / num_active_rx) - 107;
563 else 281 else
564 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE; 282 last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
565 283
566 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n", 284 IWL_DEBUG_CALIB(priv, "inband silence a %u, b %u, c %u, dBm %d\n",
567 bcn_silence_a, bcn_silence_b, bcn_silence_c, 285 bcn_silence_a, bcn_silence_b, bcn_silence_c,
568 priv->last_rx_noise); 286 last_rx_noise);
569} 287}
570 288
571#ifdef CONFIG_IWLWIFI_DEBUG 289#ifdef CONFIG_IWLWIFI_DEBUG
@@ -617,29 +335,20 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
617 335
618#define REG_RECALIB_PERIOD (60) 336#define REG_RECALIB_PERIOD (60)
619 337
620#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n" 338/**
621void iwl_rx_statistics(struct iwl_priv *priv, 339 * iwl_good_plcp_health - checks for plcp error.
622 struct iwl_rx_mem_buffer *rxb) 340 *
341 * When the plcp error is exceeding the thresholds, reset the radio
342 * to improve the throughput.
343 */
344bool iwl_good_plcp_health(struct iwl_priv *priv,
345 struct iwl_rx_packet *pkt)
623{ 346{
624 int change; 347 bool rc = true;
625 struct iwl_rx_packet *pkt = rxb_addr(rxb);
626 int combined_plcp_delta; 348 int combined_plcp_delta;
627 unsigned int plcp_msec; 349 unsigned int plcp_msec;
628 unsigned long plcp_received_jiffies; 350 unsigned long plcp_received_jiffies;
629 351
630 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
631 (int)sizeof(priv->statistics),
632 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
633
634 change = ((priv->statistics.general.temperature !=
635 pkt->u.stats.general.temperature) ||
636 ((priv->statistics.flag &
637 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
638 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
639
640#ifdef CONFIG_IWLWIFI_DEBUG
641 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
642#endif
643 /* 352 /*
644 * check for plcp_err and trigger radio reset if it exceeds 353 * check for plcp_err and trigger radio reset if it exceeds
645 * the plcp error threshold plcp_delta. 354 * the plcp error threshold plcp_delta.
@@ -660,11 +369,11 @@ void iwl_rx_statistics(struct iwl_priv *priv,
660 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err)); 369 le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
661 370
662 if ((combined_plcp_delta > 0) && 371 if ((combined_plcp_delta > 0) &&
663 ((combined_plcp_delta * 100) / plcp_msec) > 372 ((combined_plcp_delta * 100) / plcp_msec) >
664 priv->cfg->plcp_delta_threshold) { 373 priv->cfg->plcp_delta_threshold) {
665 /* 374 /*
666 * if plcp_err exceed the threshold, the following 375 * if plcp_err exceed the threshold,
667 * data is printed in csv format: 376 * the following data is printed in csv format:
668 * Text: plcp_err exceeded %d, 377 * Text: plcp_err exceeded %d,
669 * Received ofdm.plcp_err, 378 * Received ofdm.plcp_err,
670 * Current ofdm.plcp_err, 379 * Current ofdm.plcp_err,
@@ -673,22 +382,73 @@ void iwl_rx_statistics(struct iwl_priv *priv,
673 * combined_plcp_delta, 382 * combined_plcp_delta,
674 * plcp_msec 383 * plcp_msec
675 */ 384 */
676 IWL_DEBUG_RADIO(priv, PLCP_MSG, 385 IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, "
386 "%u, %u, %u, %u, %d, %u mSecs\n",
677 priv->cfg->plcp_delta_threshold, 387 priv->cfg->plcp_delta_threshold,
678 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err), 388 le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
679 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err), 389 le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
680 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err), 390 le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
681 le32_to_cpu( 391 le32_to_cpu(
682 priv->statistics.rx.ofdm_ht.plcp_err), 392 priv->statistics.rx.ofdm_ht.plcp_err),
683 combined_plcp_delta, plcp_msec); 393 combined_plcp_delta, plcp_msec);
394 rc = false;
395 }
396 }
397 return rc;
398}
399EXPORT_SYMBOL(iwl_good_plcp_health);
684 400
685 /* 401static void iwl_recover_from_statistics(struct iwl_priv *priv,
686 * Reset the RF radio due to the high plcp 402 struct iwl_rx_packet *pkt)
687 * error rate 403{
688 */ 404 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
689 iwl_force_reset(priv, IWL_RF_RESET); 405 return;
406 if (iwl_is_associated(priv)) {
407 if (priv->cfg->ops->lib->check_ack_health) {
408 if (!priv->cfg->ops->lib->check_ack_health(
409 priv, pkt)) {
410 /*
411 * low ack count detected
412 * restart Firmware
413 */
414 IWL_ERR(priv, "low ack count detected, "
415 "restart firmware\n");
416 iwl_force_reset(priv, IWL_FW_RESET);
417 }
418 } else if (priv->cfg->ops->lib->check_plcp_health) {
419 if (!priv->cfg->ops->lib->check_plcp_health(
420 priv, pkt)) {
421 /*
422 * high plcp error detected
423 * reset Radio
424 */
425 iwl_force_reset(priv, IWL_RF_RESET);
426 }
690 } 427 }
691 } 428 }
429}
430
431void iwl_rx_statistics(struct iwl_priv *priv,
432 struct iwl_rx_mem_buffer *rxb)
433{
434 int change;
435 struct iwl_rx_packet *pkt = rxb_addr(rxb);
436
437
438 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
439 (int)sizeof(priv->statistics),
440 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
441
442 change = ((priv->statistics.general.temperature !=
443 pkt->u.stats.general.temperature) ||
444 ((priv->statistics.flag &
445 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
446 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
447
448#ifdef CONFIG_IWLWIFI_DEBUG
449 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
450#endif
451 iwl_recover_from_statistics(priv, pkt);
692 452
693 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 453 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
694 454
@@ -731,139 +491,6 @@ void iwl_reply_statistics(struct iwl_priv *priv,
731} 491}
732EXPORT_SYMBOL(iwl_reply_statistics); 492EXPORT_SYMBOL(iwl_reply_statistics);
733 493
734/* Calc max signal level (dBm) among 3 possible receivers */
735static inline int iwl_calc_rssi(struct iwl_priv *priv,
736 struct iwl_rx_phy_res *rx_resp)
737{
738 return priv->cfg->ops->utils->calc_rssi(priv, rx_resp);
739}
740
741#ifdef CONFIG_IWLWIFI_DEBUG
742/**
743 * iwl_dbg_report_frame - dump frame to syslog during debug sessions
744 *
745 * You may hack this function to show different aspects of received frames,
746 * including selective frame dumps.
747 * group100 parameter selects whether to show 1 out of 100 good data frames.
748 * All beacon and probe response frames are printed.
749 */
750static void iwl_dbg_report_frame(struct iwl_priv *priv,
751 struct iwl_rx_phy_res *phy_res, u16 length,
752 struct ieee80211_hdr *header, int group100)
753{
754 u32 to_us;
755 u32 print_summary = 0;
756 u32 print_dump = 0; /* set to 1 to dump all frames' contents */
757 u32 hundred = 0;
758 u32 dataframe = 0;
759 __le16 fc;
760 u16 seq_ctl;
761 u16 channel;
762 u16 phy_flags;
763 u32 rate_n_flags;
764 u32 tsf_low;
765 int rssi;
766
767 if (likely(!(iwl_get_debug_level(priv) & IWL_DL_RX)))
768 return;
769
770 /* MAC header */
771 fc = header->frame_control;
772 seq_ctl = le16_to_cpu(header->seq_ctrl);
773
774 /* metadata */
775 channel = le16_to_cpu(phy_res->channel);
776 phy_flags = le16_to_cpu(phy_res->phy_flags);
777 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
778
779 /* signal statistics */
780 rssi = iwl_calc_rssi(priv, phy_res);
781 tsf_low = le64_to_cpu(phy_res->timestamp) & 0x0ffffffff;
782
783 to_us = !compare_ether_addr(header->addr1, priv->mac_addr);
784
785 /* if data frame is to us and all is good,
786 * (optionally) print summary for only 1 out of every 100 */
787 if (to_us && (fc & ~cpu_to_le16(IEEE80211_FCTL_PROTECTED)) ==
788 cpu_to_le16(IEEE80211_FCTL_FROMDS | IEEE80211_FTYPE_DATA)) {
789 dataframe = 1;
790 if (!group100)
791 print_summary = 1; /* print each frame */
792 else if (priv->framecnt_to_us < 100) {
793 priv->framecnt_to_us++;
794 print_summary = 0;
795 } else {
796 priv->framecnt_to_us = 0;
797 print_summary = 1;
798 hundred = 1;
799 }
800 } else {
801 /* print summary for all other frames */
802 print_summary = 1;
803 }
804
805 if (print_summary) {
806 char *title;
807 int rate_idx;
808 u32 bitrate;
809
810 if (hundred)
811 title = "100Frames";
812 else if (ieee80211_has_retry(fc))
813 title = "Retry";
814 else if (ieee80211_is_assoc_resp(fc))
815 title = "AscRsp";
816 else if (ieee80211_is_reassoc_resp(fc))
817 title = "RasRsp";
818 else if (ieee80211_is_probe_resp(fc)) {
819 title = "PrbRsp";
820 print_dump = 1; /* dump frame contents */
821 } else if (ieee80211_is_beacon(fc)) {
822 title = "Beacon";
823 print_dump = 1; /* dump frame contents */
824 } else if (ieee80211_is_atim(fc))
825 title = "ATIM";
826 else if (ieee80211_is_auth(fc))
827 title = "Auth";
828 else if (ieee80211_is_deauth(fc))
829 title = "DeAuth";
830 else if (ieee80211_is_disassoc(fc))
831 title = "DisAssoc";
832 else
833 title = "Frame";
834
835 rate_idx = iwl_hwrate_to_plcp_idx(rate_n_flags);
836 if (unlikely((rate_idx < 0) || (rate_idx >= IWL_RATE_COUNT))) {
837 bitrate = 0;
838 WARN_ON_ONCE(1);
839 } else {
840 bitrate = iwl_rates[rate_idx].ieee / 2;
841 }
842
843 /* print frame summary.
844 * MAC addresses show just the last byte (for brevity),
845 * but you can hack it to show more, if you'd like to. */
846 if (dataframe)
847 IWL_DEBUG_RX(priv, "%s: mhd=0x%04x, dst=0x%02x, "
848 "len=%u, rssi=%d, chnl=%d, rate=%u, \n",
849 title, le16_to_cpu(fc), header->addr1[5],
850 length, rssi, channel, bitrate);
851 else {
852 /* src/dst addresses assume managed mode */
853 IWL_DEBUG_RX(priv, "%s: 0x%04x, dst=0x%02x, src=0x%02x, "
854 "len=%u, rssi=%d, tim=%lu usec, "
855 "phy=0x%02x, chnl=%d\n",
856 title, le16_to_cpu(fc), header->addr1[5],
857 header->addr3[5], length, rssi,
858 tsf_low - priv->scan_start_tsf,
859 phy_flags, channel);
860 }
861 }
862 if (print_dump)
863 iwl_print_hex_dump(priv, IWL_DL_RX, header, length);
864}
865#endif
866
867/* 494/*
868 * returns non-zero if packet should be dropped 495 * returns non-zero if packet should be dropped
869 */ 496 */
@@ -911,305 +538,3 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
911 return 0; 538 return 0;
912} 539}
913EXPORT_SYMBOL(iwl_set_decrypted_flag); 540EXPORT_SYMBOL(iwl_set_decrypted_flag);
914
915static u32 iwl_translate_rx_status(struct iwl_priv *priv, u32 decrypt_in)
916{
917 u32 decrypt_out = 0;
918
919 if ((decrypt_in & RX_RES_STATUS_STATION_FOUND) ==
920 RX_RES_STATUS_STATION_FOUND)
921 decrypt_out |= (RX_RES_STATUS_STATION_FOUND |
922 RX_RES_STATUS_NO_STATION_INFO_MISMATCH);
923
924 decrypt_out |= (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK);
925
926 /* packet was not encrypted */
927 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
928 RX_RES_STATUS_SEC_TYPE_NONE)
929 return decrypt_out;
930
931 /* packet was encrypted with unknown alg */
932 if ((decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) ==
933 RX_RES_STATUS_SEC_TYPE_ERR)
934 return decrypt_out;
935
936 /* decryption was not done in HW */
937 if ((decrypt_in & RX_MPDU_RES_STATUS_DEC_DONE_MSK) !=
938 RX_MPDU_RES_STATUS_DEC_DONE_MSK)
939 return decrypt_out;
940
941 switch (decrypt_in & RX_RES_STATUS_SEC_TYPE_MSK) {
942
943 case RX_RES_STATUS_SEC_TYPE_CCMP:
944 /* alg is CCM: check MIC only */
945 if (!(decrypt_in & RX_MPDU_RES_STATUS_MIC_OK))
946 /* Bad MIC */
947 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
948 else
949 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
950
951 break;
952
953 case RX_RES_STATUS_SEC_TYPE_TKIP:
954 if (!(decrypt_in & RX_MPDU_RES_STATUS_TTAK_OK)) {
955 /* Bad TTAK */
956 decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK;
957 break;
958 }
959 /* fall through if TTAK OK */
960 default:
961 if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK))
962 decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC;
963 else
964 decrypt_out |= RX_RES_STATUS_DECRYPT_OK;
965 break;
966 };
967
968 IWL_DEBUG_RX(priv, "decrypt_in:0x%x decrypt_out = 0x%x\n",
969 decrypt_in, decrypt_out);
970
971 return decrypt_out;
972}
973
974static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
975 struct ieee80211_hdr *hdr,
976 u16 len,
977 u32 ampdu_status,
978 struct iwl_rx_mem_buffer *rxb,
979 struct ieee80211_rx_status *stats)
980{
981 struct sk_buff *skb;
982 int ret = 0;
983 __le16 fc = hdr->frame_control;
984
985 /* We only process data packets if the interface is open */
986 if (unlikely(!priv->is_open)) {
987 IWL_DEBUG_DROP_LIMIT(priv,
988 "Dropping packet while interface is not open.\n");
989 return;
990 }
991
992 /* In case of HW accelerated crypto and bad decryption, drop */
993 if (!priv->cfg->mod_params->sw_crypto &&
994 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
995 return;
996
997 skb = alloc_skb(IWL_LINK_HDR_MAX * 2, GFP_ATOMIC);
998 if (!skb) {
999 IWL_ERR(priv, "alloc_skb failed\n");
1000 return;
1001 }
1002
1003 skb_reserve(skb, IWL_LINK_HDR_MAX);
1004 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
1005
1006 /* mac80211 currently doesn't support paged SKB. Convert it to
1007 * linear SKB for management frame and data frame requires
1008 * software decryption or software defragementation. */
1009 if (ieee80211_is_mgmt(fc) ||
1010 ieee80211_has_protected(fc) ||
1011 ieee80211_has_morefrags(fc) ||
1012 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG ||
1013 (ieee80211_is_data_qos(fc) &&
1014 *ieee80211_get_qos_ctl(hdr) &
1015 IEEE80211_QOS_CONTROL_A_MSDU_PRESENT))
1016 ret = skb_linearize(skb);
1017 else
1018 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
1019 0 : -ENOMEM;
1020
1021 if (ret) {
1022 kfree_skb(skb);
1023 goto out;
1024 }
1025
1026 /*
1027 * XXX: We cannot touch the page and its virtual memory (hdr) after
1028 * here. It might have already been freed by the above skb change.
1029 */
1030
1031 iwl_update_stats(priv, false, fc, len);
1032 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
1033
1034 ieee80211_rx(priv->hw, skb);
1035 out:
1036 priv->alloc_rxb_page--;
1037 rxb->page = NULL;
1038}
1039
1040/* This is necessary only for a number of statistics, see the caller. */
1041static int iwl_is_network_packet(struct iwl_priv *priv,
1042 struct ieee80211_hdr *header)
1043{
1044 /* Filter incoming packets to determine if they are targeted toward
1045 * this network, discarding packets coming from ourselves */
1046 switch (priv->iw_mode) {
1047 case NL80211_IFTYPE_ADHOC: /* Header: Dest. | Source | BSSID */
1048 /* packets to our IBSS update information */
1049 return !compare_ether_addr(header->addr3, priv->bssid);
1050 case NL80211_IFTYPE_STATION: /* Header: Dest. | AP{BSSID} | Source */
1051 /* packets to our IBSS update information */
1052 return !compare_ether_addr(header->addr2, priv->bssid);
1053 default:
1054 return 1;
1055 }
1056}
1057
1058/* Called for REPLY_RX (legacy ABG frames), or
1059 * REPLY_RX_MPDU_CMD (HT high-throughput N frames). */
1060void iwl_rx_reply_rx(struct iwl_priv *priv,
1061 struct iwl_rx_mem_buffer *rxb)
1062{
1063 struct ieee80211_hdr *header;
1064 struct ieee80211_rx_status rx_status;
1065 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1066 struct iwl_rx_phy_res *phy_res;
1067 __le32 rx_pkt_status;
1068 struct iwl4965_rx_mpdu_res_start *amsdu;
1069 u32 len;
1070 u32 ampdu_status;
1071 u32 rate_n_flags;
1072
1073 /**
1074 * REPLY_RX and REPLY_RX_MPDU_CMD are handled differently.
1075 * REPLY_RX: physical layer info is in this buffer
1076 * REPLY_RX_MPDU_CMD: physical layer info was sent in separate
1077 * command and cached in priv->last_phy_res
1078 *
1079 * Here we set up local variables depending on which command is
1080 * received.
1081 */
1082 if (pkt->hdr.cmd == REPLY_RX) {
1083 phy_res = (struct iwl_rx_phy_res *)pkt->u.raw;
1084 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*phy_res)
1085 + phy_res->cfg_phy_cnt);
1086
1087 len = le16_to_cpu(phy_res->byte_count);
1088 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*phy_res) +
1089 phy_res->cfg_phy_cnt + len);
1090 ampdu_status = le32_to_cpu(rx_pkt_status);
1091 } else {
1092 if (!priv->last_phy_res[0]) {
1093 IWL_ERR(priv, "MPDU frame without cached PHY data\n");
1094 return;
1095 }
1096 phy_res = (struct iwl_rx_phy_res *)&priv->last_phy_res[1];
1097 amsdu = (struct iwl4965_rx_mpdu_res_start *)pkt->u.raw;
1098 header = (struct ieee80211_hdr *)(pkt->u.raw + sizeof(*amsdu));
1099 len = le16_to_cpu(amsdu->byte_count);
1100 rx_pkt_status = *(__le32 *)(pkt->u.raw + sizeof(*amsdu) + len);
1101 ampdu_status = iwl_translate_rx_status(priv,
1102 le32_to_cpu(rx_pkt_status));
1103 }
1104
1105 if ((unlikely(phy_res->cfg_phy_cnt > 20))) {
1106 IWL_DEBUG_DROP(priv, "dsp size out of range [0,20]: %d/n",
1107 phy_res->cfg_phy_cnt);
1108 return;
1109 }
1110
1111 if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) ||
1112 !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) {
1113 IWL_DEBUG_RX(priv, "Bad CRC or FIFO: 0x%08X.\n",
1114 le32_to_cpu(rx_pkt_status));
1115 return;
1116 }
1117
1118 /* This will be used in several places later */
1119 rate_n_flags = le32_to_cpu(phy_res->rate_n_flags);
1120
1121 /* rx_status carries information about the packet to mac80211 */
1122 rx_status.mactime = le64_to_cpu(phy_res->timestamp);
1123 rx_status.freq =
1124 ieee80211_channel_to_frequency(le16_to_cpu(phy_res->channel));
1125 rx_status.band = (phy_res->phy_flags & RX_RES_PHY_FLAGS_BAND_24_MSK) ?
1126 IEEE80211_BAND_2GHZ : IEEE80211_BAND_5GHZ;
1127 rx_status.rate_idx =
1128 iwl_hwrate_to_mac80211_idx(rate_n_flags, rx_status.band);
1129 rx_status.flag = 0;
1130
1131 /* TSF isn't reliable. In order to allow smooth user experience,
1132 * this W/A doesn't propagate it to the mac80211 */
1133 /*rx_status.flag |= RX_FLAG_TSFT;*/
1134
1135 priv->ucode_beacon_time = le32_to_cpu(phy_res->beacon_time_stamp);
1136
1137 /* Find max signal strength (dBm) among 3 antenna/receiver chains */
1138 rx_status.signal = iwl_calc_rssi(priv, phy_res);
1139
1140 /* Meaningful noise values are available only from beacon statistics,
1141 * which are gathered only when associated, and indicate noise
1142 * only for the associated network channel ...
1143 * Ignore these noise values while scanning (other channels) */
1144 if (iwl_is_associated(priv) &&
1145 !test_bit(STATUS_SCANNING, &priv->status)) {
1146 rx_status.noise = priv->last_rx_noise;
1147 } else {
1148 rx_status.noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1149 }
1150
1151 /* Reset beacon noise level if not associated. */
1152 if (!iwl_is_associated(priv))
1153 priv->last_rx_noise = IWL_NOISE_MEAS_NOT_AVAILABLE;
1154
1155#ifdef CONFIG_IWLWIFI_DEBUG
1156 /* Set "1" to report good data frames in groups of 100 */
1157 if (unlikely(iwl_get_debug_level(priv) & IWL_DL_RX))
1158 iwl_dbg_report_frame(priv, phy_res, len, header, 1);
1159#endif
1160 iwl_dbg_log_rx_data_frame(priv, len, header);
1161 IWL_DEBUG_STATS_LIMIT(priv, "Rssi %d, noise %d, TSF %llu\n",
1162 rx_status.signal, rx_status.noise,
1163 (unsigned long long)rx_status.mactime);
1164
1165 /*
1166 * "antenna number"
1167 *
1168 * It seems that the antenna field in the phy flags value
1169 * is actually a bit field. This is undefined by radiotap,
1170 * it wants an actual antenna number but I always get "7"
1171 * for most legacy frames I receive indicating that the
1172 * same frame was received on all three RX chains.
1173 *
1174 * I think this field should be removed in favor of a
1175 * new 802.11n radiotap field "RX chains" that is defined
1176 * as a bitmask.
1177 */
1178 rx_status.antenna =
1179 (le16_to_cpu(phy_res->phy_flags) & RX_RES_PHY_FLAGS_ANTENNA_MSK)
1180 >> RX_RES_PHY_FLAGS_ANTENNA_POS;
1181
1182 /* set the preamble flag if appropriate */
1183 if (phy_res->phy_flags & RX_RES_PHY_FLAGS_SHORT_PREAMBLE_MSK)
1184 rx_status.flag |= RX_FLAG_SHORTPRE;
1185
1186 /* Set up the HT phy flags */
1187 if (rate_n_flags & RATE_MCS_HT_MSK)
1188 rx_status.flag |= RX_FLAG_HT;
1189 if (rate_n_flags & RATE_MCS_HT40_MSK)
1190 rx_status.flag |= RX_FLAG_40MHZ;
1191 if (rate_n_flags & RATE_MCS_SGI_MSK)
1192 rx_status.flag |= RX_FLAG_SHORT_GI;
1193
1194 if (iwl_is_network_packet(priv, header)) {
1195 priv->last_rx_rssi = rx_status.signal;
1196 priv->last_beacon_time = priv->ucode_beacon_time;
1197 priv->last_tsf = le64_to_cpu(phy_res->timestamp);
1198 }
1199
1200 iwl_pass_packet_to_mac80211(priv, header, len, ampdu_status,
1201 rxb, &rx_status);
1202}
1203EXPORT_SYMBOL(iwl_rx_reply_rx);
1204
1205/* Cache phy data (Rx signal strength, etc) for HT frame (REPLY_RX_PHY_CMD).
1206 * This will be used later in iwl_rx_reply_rx() for REPLY_RX_MPDU_CMD. */
1207void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1208 struct iwl_rx_mem_buffer *rxb)
1209{
1210 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1211 priv->last_phy_res[0] = 1;
1212 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1213 sizeof(struct iwl_rx_phy_res));
1214}
1215EXPORT_SYMBOL(iwl_rx_reply_rx_phy);
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 12e455a4b90e..de0446d4bfe9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -454,7 +454,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
454 added++; 454 added++;
455 } 455 }
456 456
457 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); 457 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
458 return added; 458 return added;
459} 459}
460 460
@@ -589,7 +589,6 @@ static void iwl_bg_start_internal_scan(struct work_struct *work)
589 unlock: 589 unlock:
590 mutex_unlock(&priv->mutex); 590 mutex_unlock(&priv->mutex);
591} 591}
592EXPORT_SYMBOL(iwl_internal_short_hw_scan);
593 592
594#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ) 593#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
595 594
@@ -674,7 +673,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
674 }; 673 };
675 struct iwl_scan_cmd *scan; 674 struct iwl_scan_cmd *scan;
676 struct ieee80211_conf *conf = NULL; 675 struct ieee80211_conf *conf = NULL;
677 int ret = 0;
678 u32 rate_flags = 0; 676 u32 rate_flags = 0;
679 u16 cmd_len; 677 u16 cmd_len;
680 u16 rx_chain = 0; 678 u16 rx_chain = 0;
@@ -707,7 +705,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
707 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 705 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
708 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. " 706 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests in parallel. "
709 "Ignoring second request.\n"); 707 "Ignoring second request.\n");
710 ret = -EIO;
711 goto done; 708 goto done;
712 } 709 }
713 710
@@ -740,7 +737,8 @@ static void iwl_bg_request_scan(struct work_struct *data)
740 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) + 737 priv->scan = kmalloc(sizeof(struct iwl_scan_cmd) +
741 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 738 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
742 if (!priv->scan) { 739 if (!priv->scan) {
743 ret = -ENOMEM; 740 IWL_DEBUG_SCAN(priv,
741 "fail to allocate memory for scan\n");
744 goto done; 742 goto done;
745 } 743 }
746 } 744 }
@@ -824,10 +822,11 @@ static void iwl_bg_request_scan(struct work_struct *data)
824 */ 822 */
825 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0; 823 scan->good_CRC_th = is_active ? IWL_GOOD_CRC_TH : 0;
826 824
827 /* Force use of chains B and C (0x6) for scan Rx for 4965 825 /* Force use of chains B and C (0x6) for scan Rx
828 * Avoid A (0x1) because of its off-channel reception on A-band. 826 * Avoid A (0x1) for the device has off-channel reception
827 * on A-band.
829 */ 828 */
830 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) 829 if (priv->cfg->off_channel_workaround)
831 rx_ant = ANT_BC; 830 rx_ant = ANT_BC;
832 } else { 831 } else {
833 IWL_WARN(priv, "Invalid scan band count\n"); 832 IWL_WARN(priv, "Invalid scan band count\n");
@@ -901,8 +900,7 @@ static void iwl_bg_request_scan(struct work_struct *data)
901 scan->len = cpu_to_le16(cmd.len); 900 scan->len = cpu_to_le16(cmd.len);
902 901
903 set_bit(STATUS_SCAN_HW, &priv->status); 902 set_bit(STATUS_SCAN_HW, &priv->status);
904 ret = iwl_send_cmd_sync(priv, &cmd); 903 if (iwl_send_cmd_sync(priv, &cmd))
905 if (ret)
906 goto done; 904 goto done;
907 905
908 queue_delayed_work(priv->workqueue, &priv->scan_check, 906 queue_delayed_work(priv->workqueue, &priv->scan_check,
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 4a6686fa6b36..d86ecd2f9ec2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -29,14 +29,12 @@
29 29
30#include <net/mac80211.h> 30#include <net/mac80211.h>
31#include <linux/etherdevice.h> 31#include <linux/etherdevice.h>
32#include <linux/sched.h>
32 33
33#include "iwl-dev.h" 34#include "iwl-dev.h"
34#include "iwl-core.h" 35#include "iwl-core.h"
35#include "iwl-sta.h" 36#include "iwl-sta.h"
36 37
37#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
38#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
39
40u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr) 38u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
41{ 39{
42 int i; 40 int i;
@@ -64,6 +62,19 @@ u8 iwl_find_station(struct iwl_priv *priv, const u8 *addr)
64 addr, priv->num_stations); 62 addr, priv->num_stations);
65 63
66 out: 64 out:
65 /*
66 * It may be possible that more commands interacting with stations
67 * arrive before we completed processing the adding of
68 * station
69 */
70 if (ret != IWL_INVALID_STATION &&
71 (!(priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) ||
72 ((priv->stations[ret].used & IWL_STA_UCODE_ACTIVE) &&
73 (priv->stations[ret].used & IWL_STA_UCODE_INPROGRESS)))) {
74 IWL_ERR(priv, "Requested station info for sta %d before ready.\n",
75 ret);
76 ret = IWL_INVALID_STATION;
77 }
67 spin_unlock_irqrestore(&priv->sta_lock, flags); 78 spin_unlock_irqrestore(&priv->sta_lock, flags);
68 return ret; 79 return ret;
69} 80}
@@ -132,7 +143,7 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
132 sta_id); 143 sta_id);
133 break; 144 break;
134 case ADD_STA_MODIFY_NON_EXIST_STA: 145 case ADD_STA_MODIFY_NON_EXIST_STA:
135 IWL_ERR(priv, "Attempting to modify non-existing station %d \n", 146 IWL_ERR(priv, "Attempting to modify non-existing station %d\n",
136 sta_id); 147 sta_id);
137 break; 148 break;
138 default: 149 default:
@@ -158,13 +169,6 @@ static void iwl_process_add_sta_resp(struct iwl_priv *priv,
158 priv->stations[sta_id].sta.mode == 169 priv->stations[sta_id].sta.mode ==
159 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added", 170 STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
160 addsta->sta.addr); 171 addsta->sta.addr);
161
162 /*
163 * Determine if we wanted to modify or add a station,
164 * if adding a station succeeded we have some more initialization
165 * to do when using station notification. TODO
166 */
167
168 spin_unlock_irqrestore(&priv->sta_lock, flags); 172 spin_unlock_irqrestore(&priv->sta_lock, flags);
169} 173}
170 174
@@ -190,6 +194,10 @@ int iwl_send_add_sta(struct iwl_priv *priv,
190 .flags = flags, 194 .flags = flags,
191 .data = data, 195 .data = data,
192 }; 196 };
197 u8 sta_id __maybe_unused = sta->sta.sta_id;
198
199 IWL_DEBUG_INFO(priv, "Adding sta %u (%pM) %ssynchronously\n",
200 sta_id, sta->sta.addr, flags & CMD_ASYNC ? "a" : "");
193 201
194 if (flags & CMD_ASYNC) 202 if (flags & CMD_ASYNC)
195 cmd.callback = iwl_add_sta_callback; 203 cmd.callback = iwl_add_sta_callback;
@@ -263,18 +271,19 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
263} 271}
264 272
265/** 273/**
266 * iwl_add_station - Add station to tables in driver and device 274 * iwl_prep_station - Prepare station information for addition
275 *
276 * should be called with sta_lock held
267 */ 277 */
268u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, 278static u8 iwl_prep_station(struct iwl_priv *priv, const u8 *addr,
269 struct ieee80211_sta_ht_cap *ht_info) 279 bool is_ap,
280 struct ieee80211_sta_ht_cap *ht_info)
270{ 281{
271 struct iwl_station_entry *station; 282 struct iwl_station_entry *station;
272 unsigned long flags_spin;
273 int i; 283 int i;
274 int sta_id = IWL_INVALID_STATION; 284 u8 sta_id = IWL_INVALID_STATION;
275 u16 rate; 285 u16 rate;
276 286
277 spin_lock_irqsave(&priv->sta_lock, flags_spin);
278 if (is_ap) 287 if (is_ap)
279 sta_id = IWL_AP_ID; 288 sta_id = IWL_AP_ID;
280 else if (is_broadcast_ether_addr(addr)) 289 else if (is_broadcast_ether_addr(addr))
@@ -292,20 +301,32 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
292 sta_id = i; 301 sta_id = i;
293 } 302 }
294 303
295 /* These two conditions have the same outcome, but keep them separate 304 /*
296 since they have different meanings */ 305 * These two conditions have the same outcome, but keep them
297 if (unlikely(sta_id == IWL_INVALID_STATION)) { 306 * separate
298 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 307 */
308 if (unlikely(sta_id == IWL_INVALID_STATION))
309 return sta_id;
310
311 /*
312 * uCode is not able to deal with multiple requests to add a
313 * station. Keep track if one is in progress so that we do not send
314 * another.
315 */
316 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
317 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
318 sta_id);
299 return sta_id; 319 return sta_id;
300 } 320 }
301 321
302 if (priv->stations[sta_id].used && 322 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
323 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) &&
303 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) { 324 !compare_ether_addr(priv->stations[sta_id].sta.sta.addr, addr)) {
304 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 325 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
326 sta_id, addr);
305 return sta_id; 327 return sta_id;
306 } 328 }
307 329
308
309 station = &priv->stations[sta_id]; 330 station = &priv->stations[sta_id];
310 station->used = IWL_STA_DRIVER_ACTIVE; 331 station->used = IWL_STA_DRIVER_ACTIVE;
311 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n", 332 IWL_DEBUG_ASSOC(priv, "Add STA to driver ID %d: %pM\n",
@@ -330,86 +351,188 @@ u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags,
330 /* Turn on both antennas for the station... */ 351 /* Turn on both antennas for the station... */
331 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK); 352 station->sta.rate_n_flags = cpu_to_le16(rate | RATE_MCS_ANT_AB_MSK);
332 353
354 return sta_id;
355
356}
357
358#define STA_WAIT_TIMEOUT (HZ/2)
359
360/**
361 * iwl_add_station_common -
362 */
363int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
364 bool is_ap,
365 struct ieee80211_sta_ht_cap *ht_info,
366 u8 *sta_id_r)
367{
368 struct iwl_station_entry *station;
369 unsigned long flags_spin;
370 int ret = 0;
371 u8 sta_id;
372
373 *sta_id_r = 0;
374 spin_lock_irqsave(&priv->sta_lock, flags_spin);
375 sta_id = iwl_prep_station(priv, addr, is_ap, ht_info);
376 if (sta_id == IWL_INVALID_STATION) {
377 IWL_ERR(priv, "Unable to prepare station %pM for addition\n",
378 addr);
379 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
380 return -EINVAL;
381 }
382
383 /*
384 * uCode is not able to deal with multiple requests to add a
385 * station. Keep track if one is in progress so that we do not send
386 * another.
387 */
388 if (priv->stations[sta_id].used & IWL_STA_UCODE_INPROGRESS) {
389 IWL_DEBUG_INFO(priv, "STA %d already in process of being added.\n",
390 sta_id);
391 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
392 return -EEXIST;
393 }
394
395 if ((priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE) &&
396 (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
397 IWL_DEBUG_ASSOC(priv, "STA %d (%pM) already added, not adding again.\n",
398 sta_id, addr);
399 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
400 return -EEXIST;
401 }
402
403 priv->stations[sta_id].used |= IWL_STA_UCODE_INPROGRESS;
404 station = &priv->stations[sta_id];
333 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 405 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
334 406
335 /* Add station to device's station table */ 407 /* Add station to device's station table */
336 iwl_send_add_sta(priv, &station->sta, flags); 408 ret = iwl_send_add_sta(priv, &station->sta, CMD_SYNC);
337 return sta_id; 409 if (ret) {
338 410 IWL_ERR(priv, "Adding station %pM failed.\n", station->sta.sta.addr);
411 spin_lock_irqsave(&priv->sta_lock, flags_spin);
412 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
413 priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
414 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
415 }
416 *sta_id_r = sta_id;
417 return ret;
339} 418}
340EXPORT_SYMBOL(iwl_add_station); 419EXPORT_SYMBOL(iwl_add_station_common);
341 420
342static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const u8 *addr) 421static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
343{ 422{
344 unsigned long flags; 423 int i, r;
345 u8 sta_id = iwl_find_station(priv, addr); 424 struct iwl_link_quality_cmd link_cmd = {
425 .reserved1 = 0,
426 };
427 u32 rate_flags;
428 int ret = 0;
346 429
347 BUG_ON(sta_id == IWL_INVALID_STATION); 430 /* Set up the rate scaling to start at selected rate, fall back
431 * all the way down to 1M in IEEE order, and then spin on 1M */
432 if (is_ap)
433 r = IWL_RATE_54M_INDEX;
434 else if (priv->band == IEEE80211_BAND_5GHZ)
435 r = IWL_RATE_6M_INDEX;
436 else
437 r = IWL_RATE_1M_INDEX;
348 438
349 IWL_DEBUG_ASSOC(priv, "Removed STA from Ucode: %pM\n", addr); 439 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
440 rate_flags = 0;
441 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
442 rate_flags |= RATE_MCS_CCK_MSK;
350 443
351 spin_lock_irqsave(&priv->sta_lock, flags); 444 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
445 RATE_MCS_ANT_POS;
352 446
353 /* Ucode must be active and driver must be non active */ 447 link_cmd.rs_table[i].rate_n_flags =
354 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE) 448 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
355 IWL_ERR(priv, "removed non active STA %d\n", sta_id); 449 r = iwl_get_prev_ieee_rate(r);
450 }
356 451
357 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE; 452 link_cmd.general_params.single_stream_ant_msk =
453 first_antenna(priv->hw_params.valid_tx_ant);
454 link_cmd.general_params.dual_stream_ant_msk = 3;
455 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
456 link_cmd.agg_params.agg_time_limit =
457 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
358 458
359 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry)); 459 /* Update the rate scaling for control frame Tx to AP */
360 spin_unlock_irqrestore(&priv->sta_lock, flags); 460 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
461
462 ret = iwl_send_cmd_pdu(priv, REPLY_TX_LINK_QUALITY_CMD,
463 sizeof(link_cmd), &link_cmd);
464 if (ret)
465 IWL_ERR(priv, "REPLY_TX_LINK_QUALITY_CMD failed (%d)\n", ret);
361} 466}
362 467
363static void iwl_remove_sta_callback(struct iwl_priv *priv, 468/*
364 struct iwl_device_cmd *cmd, 469 * iwl_add_local_stations - Add stations not requested by mac80211
365 struct iwl_rx_packet *pkt) 470 *
471 * This will be either the broadcast station or the bssid station needed by
472 * ad-hoc.
473 *
474 * Function sleeps.
475 */
476int iwl_add_local_station(struct iwl_priv *priv, const u8 *addr, bool init_rs)
366{ 477{
367 struct iwl_rem_sta_cmd *rm_sta = 478 int ret;
368 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 479 u8 sta_id;
369 const u8 *addr = rm_sta->addr;
370 480
371 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 481 ret = iwl_add_station_common(priv, addr, 0, NULL, &sta_id);
372 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 482 if (ret) {
373 pkt->hdr.flags); 483 IWL_ERR(priv, "Unable to add station %pM\n", addr);
374 return; 484 return ret;
375 } 485 }
376 486
377 switch (pkt->u.rem_sta.status) { 487 if (init_rs)
378 case REM_STA_SUCCESS_MSK: 488 /* Set up default rate scaling table in device's station table */
379 iwl_sta_ucode_deactivate(priv, addr); 489 iwl_sta_init_lq(priv, addr, false);
380 break; 490 return 0;
381 default:
382 IWL_ERR(priv, "REPLY_REMOVE_STA failed\n");
383 break;
384 }
385} 491}
492EXPORT_SYMBOL(iwl_add_local_station);
386 493
387static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, 494/**
388 u8 flags) 495 * iwl_sta_ucode_deactivate - deactivate ucode status for a station
496 *
497 * priv->sta_lock must be held
498 */
499static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
500{
501 /* Ucode must be active and driver must be non active */
502 if (priv->stations[sta_id].used != IWL_STA_UCODE_ACTIVE)
503 IWL_ERR(priv, "removed non active STA %u\n", sta_id);
504
505 priv->stations[sta_id].used &= ~IWL_STA_UCODE_ACTIVE;
506
507 memset(&priv->stations[sta_id], 0, sizeof(struct iwl_station_entry));
508 IWL_DEBUG_ASSOC(priv, "Removed STA %u\n", sta_id);
509}
510
511static int iwl_send_remove_station(struct iwl_priv *priv,
512 struct iwl_station_entry *station)
389{ 513{
390 struct iwl_rx_packet *pkt; 514 struct iwl_rx_packet *pkt;
391 int ret; 515 int ret;
392 516
517 unsigned long flags_spin;
393 struct iwl_rem_sta_cmd rm_sta_cmd; 518 struct iwl_rem_sta_cmd rm_sta_cmd;
394 519
395 struct iwl_host_cmd cmd = { 520 struct iwl_host_cmd cmd = {
396 .id = REPLY_REMOVE_STA, 521 .id = REPLY_REMOVE_STA,
397 .len = sizeof(struct iwl_rem_sta_cmd), 522 .len = sizeof(struct iwl_rem_sta_cmd),
398 .flags = flags, 523 .flags = CMD_SYNC,
399 .data = &rm_sta_cmd, 524 .data = &rm_sta_cmd,
400 }; 525 };
401 526
402 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd)); 527 memset(&rm_sta_cmd, 0, sizeof(rm_sta_cmd));
403 rm_sta_cmd.num_sta = 1; 528 rm_sta_cmd.num_sta = 1;
404 memcpy(&rm_sta_cmd.addr, addr , ETH_ALEN); 529 memcpy(&rm_sta_cmd.addr, &station->sta.sta.addr , ETH_ALEN);
530
531 cmd.flags |= CMD_WANT_SKB;
405 532
406 if (flags & CMD_ASYNC)
407 cmd.callback = iwl_remove_sta_callback;
408 else
409 cmd.flags |= CMD_WANT_SKB;
410 ret = iwl_send_cmd(priv, &cmd); 533 ret = iwl_send_cmd(priv, &cmd);
411 534
412 if (ret || (flags & CMD_ASYNC)) 535 if (ret)
413 return ret; 536 return ret;
414 537
415 pkt = (struct iwl_rx_packet *)cmd.reply_page; 538 pkt = (struct iwl_rx_packet *)cmd.reply_page;
@@ -422,7 +545,9 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
422 if (!ret) { 545 if (!ret) {
423 switch (pkt->u.rem_sta.status) { 546 switch (pkt->u.rem_sta.status) {
424 case REM_STA_SUCCESS_MSK: 547 case REM_STA_SUCCESS_MSK:
425 iwl_sta_ucode_deactivate(priv, addr); 548 spin_lock_irqsave(&priv->sta_lock, flags_spin);
549 iwl_sta_ucode_deactivate(priv, station->sta.sta.sta_id);
550 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
426 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 551 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
427 break; 552 break;
428 default: 553 default:
@@ -439,23 +564,35 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
439/** 564/**
440 * iwl_remove_station - Remove driver's knowledge of station. 565 * iwl_remove_station - Remove driver's knowledge of station.
441 */ 566 */
442int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap) 567static int iwl_remove_station(struct iwl_priv *priv, struct ieee80211_sta *sta)
443{ 568{
444 int sta_id = IWL_INVALID_STATION; 569 int sta_id = IWL_INVALID_STATION;
445 int i, ret = -EINVAL; 570 int i, ret = -EINVAL;
446 unsigned long flags; 571 unsigned long flags;
572 bool is_ap = priv->iw_mode == NL80211_IFTYPE_STATION;
573 struct iwl_station_entry *station;
574
575 if (!iwl_is_ready(priv)) {
576 IWL_DEBUG_INFO(priv,
577 "Unable to remove station %pM, device not ready.\n",
578 sta->addr);
579 /*
580 * It is typical for stations to be removed when we are
581 * going down. Return success since device will be down
582 * soon anyway
583 */
584 return 0;
585 }
447 586
448 spin_lock_irqsave(&priv->sta_lock, flags); 587 spin_lock_irqsave(&priv->sta_lock, flags);
449 588
450 if (is_ap) 589 if (is_ap)
451 sta_id = IWL_AP_ID; 590 sta_id = IWL_AP_ID;
452 else if (is_broadcast_ether_addr(addr))
453 sta_id = priv->hw_params.bcast_sta_id;
454 else 591 else
455 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++) 592 for (i = IWL_STA_ID; i < priv->hw_params.max_stations; i++)
456 if (priv->stations[i].used && 593 if (priv->stations[i].used &&
457 !compare_ether_addr(priv->stations[i].sta.sta.addr, 594 !compare_ether_addr(priv->stations[i].sta.sta.addr,
458 addr)) { 595 sta->addr)) {
459 sta_id = i; 596 sta_id = i;
460 break; 597 break;
461 } 598 }
@@ -464,17 +601,17 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
464 goto out; 601 goto out;
465 602
466 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n", 603 IWL_DEBUG_ASSOC(priv, "Removing STA from driver:%d %pM\n",
467 sta_id, addr); 604 sta_id, sta->addr);
468 605
469 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) { 606 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
470 IWL_ERR(priv, "Removing %pM but non DRIVER active\n", 607 IWL_DEBUG_INFO(priv, "Removing %pM but non DRIVER active\n",
471 addr); 608 sta->addr);
472 goto out; 609 goto out;
473 } 610 }
474 611
475 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) { 612 if (!(priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE)) {
476 IWL_ERR(priv, "Removing %pM but non UCODE active\n", 613 IWL_DEBUG_INFO(priv, "Removing %pM but non UCODE active\n",
477 addr); 614 sta->addr);
478 goto out; 615 goto out;
479 } 616 }
480 617
@@ -485,9 +622,10 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
485 622
486 BUG_ON(priv->num_stations < 0); 623 BUG_ON(priv->num_stations < 0);
487 624
625 station = &priv->stations[sta_id];
488 spin_unlock_irqrestore(&priv->sta_lock, flags); 626 spin_unlock_irqrestore(&priv->sta_lock, flags);
489 627
490 ret = iwl_send_remove_station(priv, addr, CMD_ASYNC); 628 ret = iwl_send_remove_station(priv, station);
491 return ret; 629 return ret;
492out: 630out:
493 spin_unlock_irqrestore(&priv->sta_lock, flags); 631 spin_unlock_irqrestore(&priv->sta_lock, flags);
@@ -495,37 +633,122 @@ out:
495} 633}
496 634
497/** 635/**
498 * iwl_clear_stations_table - Clear the driver's station table 636 * iwl_clear_ucode_stations() - clear entire station table driver and/or ucode
499 * 637 * @priv:
500 * NOTE: This does not clear or otherwise alter the device's station table. 638 * @force: If set then the uCode station table needs to be cleared here. If
639 * not set then the uCode station table has already been cleared,
640 * for example after sending it a RXON command without ASSOC bit
641 * set, and we just need to change driver state here.
501 */ 642 */
502void iwl_clear_stations_table(struct iwl_priv *priv) 643void iwl_clear_ucode_stations(struct iwl_priv *priv, bool force)
503{ 644{
504 unsigned long flags;
505 int i; 645 int i;
646 unsigned long flags_spin;
647 bool cleared = false;
648
649 IWL_DEBUG_INFO(priv, "Clearing ucode stations in driver%s\n",
650 force ? " and ucode" : "");
651
652 if (force) {
653 if (!iwl_is_ready(priv)) {
654 /*
655 * If device is not ready at this point the station
656 * table is likely already empty (uCode not ready
657 * to receive station requests) or will soon be
658 * due to interface going down.
659 */
660 IWL_DEBUG_INFO(priv, "Unable to remove stations from device - device not ready\n");
661 } else {
662 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL);
663 }
664 }
506 665
507 spin_lock_irqsave(&priv->sta_lock, flags); 666 spin_lock_irqsave(&priv->sta_lock, flags_spin);
667 if (force) {
668 IWL_DEBUG_INFO(priv, "Clearing all station information in driver\n");
669 priv->num_stations = 0;
670 memset(priv->stations, 0, sizeof(priv->stations));
671 } else {
672 for (i = 0; i < priv->hw_params.max_stations; i++) {
673 if (priv->stations[i].used & IWL_STA_UCODE_ACTIVE) {
674 IWL_DEBUG_INFO(priv, "Clearing ucode active for station %d\n", i);
675 priv->stations[i].used &= ~IWL_STA_UCODE_ACTIVE;
676 cleared = true;
677 }
678 }
679 }
680 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
508 681
509 if (iwl_is_alive(priv) && 682 if (!cleared)
510 !test_bit(STATUS_EXIT_PENDING, &priv->status) && 683 IWL_DEBUG_INFO(priv, "No active stations found to be cleared\n");
511 iwl_send_cmd_pdu_async(priv, REPLY_REMOVE_ALL_STA, 0, NULL, NULL)) 684}
512 IWL_ERR(priv, "Couldn't clear the station table\n"); 685EXPORT_SYMBOL(iwl_clear_ucode_stations);
513 686
514 priv->num_stations = 0; 687/**
515 memset(priv->stations, 0, sizeof(priv->stations)); 688 * iwl_restore_stations() - Restore driver known stations to device
689 *
690 * All stations considered active by driver, but not present in ucode, is
691 * restored.
692 *
693 * Function sleeps.
694 */
695void iwl_restore_stations(struct iwl_priv *priv)
696{
697 struct iwl_station_entry *station;
698 unsigned long flags_spin;
699 int i;
700 bool found = false;
701 int ret;
516 702
517 /* clean ucode key table bit map */ 703 if (!iwl_is_ready(priv)) {
518 priv->ucode_key_table = 0; 704 IWL_DEBUG_INFO(priv, "Not ready yet, not restoring any stations.\n");
705 return;
706 }
519 707
520 /* keep track of static keys */ 708 IWL_DEBUG_ASSOC(priv, "Restoring all known stations ... start.\n");
521 for (i = 0; i < WEP_KEYS_MAX ; i++) { 709 spin_lock_irqsave(&priv->sta_lock, flags_spin);
522 if (priv->wep_keys[i].key_size) 710 for (i = 0; i < priv->hw_params.max_stations; i++) {
523 set_bit(i, &priv->ucode_key_table); 711 if ((priv->stations[i].used & IWL_STA_DRIVER_ACTIVE) &&
712 !(priv->stations[i].used & IWL_STA_UCODE_ACTIVE)) {
713 IWL_DEBUG_ASSOC(priv, "Restoring sta %pM\n",
714 priv->stations[i].sta.sta.addr);
715 priv->stations[i].sta.mode = 0;
716 priv->stations[i].used |= IWL_STA_UCODE_INPROGRESS;
717 found = true;
718 }
524 } 719 }
525 720
526 spin_unlock_irqrestore(&priv->sta_lock, flags); 721 for (i = 0; i < priv->hw_params.max_stations; i++) {
722 if ((priv->stations[i].used & IWL_STA_UCODE_INPROGRESS)) {
723 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
724 station = &priv->stations[i];
725 ret = iwl_send_add_sta(priv, &priv->stations[i].sta, CMD_SYNC);
726 if (ret) {
727 IWL_ERR(priv, "Adding station %pM failed.\n",
728 station->sta.sta.addr);
729 spin_lock_irqsave(&priv->sta_lock, flags_spin);
730 priv->stations[i].used &= ~IWL_STA_DRIVER_ACTIVE;
731 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
732 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
733 }
734 /*
735 * Rate scaling has already been initialized, send
736 * current LQ command
737 */
738 if (station->lq)
739 iwl_send_lq_cmd(priv, station->lq, CMD_SYNC, true);
740 spin_lock_irqsave(&priv->sta_lock, flags_spin);
741 priv->stations[i].used &= ~IWL_STA_UCODE_INPROGRESS;
742 }
743 }
744
745 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
746 if (!found)
747 IWL_DEBUG_INFO(priv, "Restoring all known stations .... no stations to be restored.\n");
748 else
749 IWL_DEBUG_INFO(priv, "Restoring all known stations .... complete.\n");
527} 750}
528EXPORT_SYMBOL(iwl_clear_stations_table); 751EXPORT_SYMBOL(iwl_restore_stations);
529 752
530int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 753int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
531{ 754{
@@ -539,7 +762,7 @@ int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
539} 762}
540EXPORT_SYMBOL(iwl_get_free_ucode_key_index); 763EXPORT_SYMBOL(iwl_get_free_ucode_key_index);
541 764
542int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty) 765static int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
543{ 766{
544 int i, not_empty = 0; 767 int i, not_empty = 0;
545 u8 buff[sizeof(struct iwl_wep_cmd) + 768 u8 buff[sizeof(struct iwl_wep_cmd) +
@@ -549,9 +772,11 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
549 struct iwl_host_cmd cmd = { 772 struct iwl_host_cmd cmd = {
550 .id = REPLY_WEPKEY, 773 .id = REPLY_WEPKEY,
551 .data = wep_cmd, 774 .data = wep_cmd,
552 .flags = CMD_ASYNC, 775 .flags = CMD_SYNC,
553 }; 776 };
554 777
778 might_sleep();
779
555 memset(wep_cmd, 0, cmd_size + 780 memset(wep_cmd, 0, cmd_size +
556 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX)); 781 (sizeof(struct iwl_wep_key) * WEP_KEYS_MAX));
557 782
@@ -581,33 +806,34 @@ int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty)
581 else 806 else
582 return 0; 807 return 0;
583} 808}
584EXPORT_SYMBOL(iwl_send_static_wepkey_cmd); 809
810int iwl_restore_default_wep_keys(struct iwl_priv *priv)
811{
812 WARN_ON(!mutex_is_locked(&priv->mutex));
813
814 return iwl_send_static_wepkey_cmd(priv, 0);
815}
816EXPORT_SYMBOL(iwl_restore_default_wep_keys);
585 817
586int iwl_remove_default_wep_key(struct iwl_priv *priv, 818int iwl_remove_default_wep_key(struct iwl_priv *priv,
587 struct ieee80211_key_conf *keyconf) 819 struct ieee80211_key_conf *keyconf)
588{ 820{
589 int ret; 821 int ret;
590 unsigned long flags;
591 822
592 spin_lock_irqsave(&priv->sta_lock, flags); 823 WARN_ON(!mutex_is_locked(&priv->mutex));
824
593 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n", 825 IWL_DEBUG_WEP(priv, "Removing default WEP key: idx=%d\n",
594 keyconf->keyidx); 826 keyconf->keyidx);
595 827
596 if (!test_and_clear_bit(keyconf->keyidx, &priv->ucode_key_table))
597 IWL_ERR(priv, "index %d not used in uCode key table.\n",
598 keyconf->keyidx);
599
600 priv->default_wep_key--;
601 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0])); 828 memset(&priv->wep_keys[keyconf->keyidx], 0, sizeof(priv->wep_keys[0]));
602 if (iwl_is_rfkill(priv)) { 829 if (iwl_is_rfkill(priv)) {
603 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n"); 830 IWL_DEBUG_WEP(priv, "Not sending REPLY_WEPKEY command due to RFKILL.\n");
604 spin_unlock_irqrestore(&priv->sta_lock, flags); 831 /* but keys in device are clear anyway so return success */
605 return 0; 832 return 0;
606 } 833 }
607 ret = iwl_send_static_wepkey_cmd(priv, 1); 834 ret = iwl_send_static_wepkey_cmd(priv, 1);
608 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n", 835 IWL_DEBUG_WEP(priv, "Remove default WEP key: idx=%d ret=%d\n",
609 keyconf->keyidx, ret); 836 keyconf->keyidx, ret);
610 spin_unlock_irqrestore(&priv->sta_lock, flags);
611 837
612 return ret; 838 return ret;
613} 839}
@@ -617,7 +843,8 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
617 struct ieee80211_key_conf *keyconf) 843 struct ieee80211_key_conf *keyconf)
618{ 844{
619 int ret; 845 int ret;
620 unsigned long flags; 846
847 WARN_ON(!mutex_is_locked(&priv->mutex));
621 848
622 if (keyconf->keylen != WEP_KEY_LEN_128 && 849 if (keyconf->keylen != WEP_KEY_LEN_128 &&
623 keyconf->keylen != WEP_KEY_LEN_64) { 850 keyconf->keylen != WEP_KEY_LEN_64) {
@@ -629,13 +856,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
629 keyconf->hw_key_idx = HW_KEY_DEFAULT; 856 keyconf->hw_key_idx = HW_KEY_DEFAULT;
630 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP; 857 priv->stations[IWL_AP_ID].keyinfo.alg = ALG_WEP;
631 858
632 spin_lock_irqsave(&priv->sta_lock, flags);
633 priv->default_wep_key++;
634
635 if (test_and_set_bit(keyconf->keyidx, &priv->ucode_key_table))
636 IWL_ERR(priv, "index %d already used in uCode key table.\n",
637 keyconf->keyidx);
638
639 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen; 859 priv->wep_keys[keyconf->keyidx].key_size = keyconf->keylen;
640 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key, 860 memcpy(&priv->wep_keys[keyconf->keyidx].key, &keyconf->key,
641 keyconf->keylen); 861 keyconf->keylen);
@@ -643,7 +863,6 @@ int iwl_set_default_wep_key(struct iwl_priv *priv,
643 ret = iwl_send_static_wepkey_cmd(priv, 0); 863 ret = iwl_send_static_wepkey_cmd(priv, 0);
644 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n", 864 IWL_DEBUG_WEP(priv, "Set default WEP key: len=%d idx=%d ret=%d\n",
645 keyconf->keylen, keyconf->keyidx, ret); 865 keyconf->keylen, keyconf->keyidx, ret);
646 spin_unlock_irqrestore(&priv->sta_lock, flags);
647 866
648 return ret; 867 return ret;
649} 868}
@@ -885,7 +1104,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
885 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK; 1104 priv->stations[sta_id].sta.mode = STA_CONTROL_MODIFY_MSK;
886 1105
887 if (iwl_is_rfkill(priv)) { 1106 if (iwl_is_rfkill(priv)) {
888 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled. \n"); 1107 IWL_DEBUG_WEP(priv, "Not sending REPLY_ADD_STA command because RFKILL enabled.\n");
889 spin_unlock_irqrestore(&priv->sta_lock, flags); 1108 spin_unlock_irqrestore(&priv->sta_lock, flags);
890 return 0; 1109 return 0;
891 } 1110 }
@@ -948,9 +1167,22 @@ static inline void iwl_dump_lq_cmd(struct iwl_priv *priv,
948} 1167}
949#endif 1168#endif
950 1169
1170/**
1171 * iwl_send_lq_cmd() - Send link quality command
1172 * @init: This command is sent as part of station initialization right
1173 * after station has been added.
1174 *
1175 * The link quality command is sent as the last step of station creation.
1176 * This is the special case in which init is set and we call a callback in
1177 * this case to clear the state indicating that station creation is in
1178 * progress.
1179 */
951int iwl_send_lq_cmd(struct iwl_priv *priv, 1180int iwl_send_lq_cmd(struct iwl_priv *priv,
952 struct iwl_link_quality_cmd *lq, u8 flags) 1181 struct iwl_link_quality_cmd *lq, u8 flags, bool init)
953{ 1182{
1183 int ret = 0;
1184 unsigned long flags_spin;
1185
954 struct iwl_host_cmd cmd = { 1186 struct iwl_host_cmd cmd = {
955 .id = REPLY_TX_LINK_QUALITY_CMD, 1187 .id = REPLY_TX_LINK_QUALITY_CMD,
956 .len = sizeof(struct iwl_link_quality_cmd), 1188 .len = sizeof(struct iwl_link_quality_cmd),
@@ -958,175 +1190,35 @@ int iwl_send_lq_cmd(struct iwl_priv *priv,
958 .data = lq, 1190 .data = lq,
959 }; 1191 };
960 1192
961 if ((lq->sta_id == 0xFF) && 1193 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
962 (priv->iw_mode == NL80211_IFTYPE_ADHOC))
963 return -EINVAL; 1194 return -EINVAL;
964 1195
965 if (lq->sta_id == 0xFF)
966 lq->sta_id = IWL_AP_ID;
967
968 iwl_dump_lq_cmd(priv, lq); 1196 iwl_dump_lq_cmd(priv, lq);
1197 BUG_ON(init && (cmd.flags & CMD_ASYNC));
969 1198
970 if (iwl_is_associated(priv) && priv->assoc_station_added) 1199 iwl_dump_lq_cmd(priv, lq);
971 return iwl_send_cmd(priv, &cmd); 1200 ret = iwl_send_cmd(priv, &cmd);
1201 if (ret || (cmd.flags & CMD_ASYNC))
1202 return ret;
972 1203
1204 if (init) {
1205 IWL_DEBUG_INFO(priv, "init LQ command complete, clearing sta addition status for sta %d\n",
1206 lq->sta_id);
1207 spin_lock_irqsave(&priv->sta_lock, flags_spin);
1208 priv->stations[lq->sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
1209 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
1210 }
973 return 0; 1211 return 0;
974} 1212}
975EXPORT_SYMBOL(iwl_send_lq_cmd); 1213EXPORT_SYMBOL(iwl_send_lq_cmd);
976 1214
977/** 1215/**
978 * iwl_sta_init_lq - Initialize a station's hardware rate table
979 *
980 * The uCode's station table contains a table of fallback rates
981 * for automatic fallback during transmission.
982 *
983 * NOTE: This sets up a default set of values. These will be replaced later
984 * if the driver's iwl-agn-rs rate scaling algorithm is used, instead of
985 * rc80211_simple.
986 *
987 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
988 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
989 * which requires station table entry to exist).
990 */
991static void iwl_sta_init_lq(struct iwl_priv *priv, const u8 *addr, bool is_ap)
992{
993 int i, r;
994 struct iwl_link_quality_cmd link_cmd = {
995 .reserved1 = 0,
996 };
997 u32 rate_flags;
998
999 /* Set up the rate scaling to start at selected rate, fall back
1000 * all the way down to 1M in IEEE order, and then spin on 1M */
1001 if (is_ap)
1002 r = IWL_RATE_54M_INDEX;
1003 else if (priv->band == IEEE80211_BAND_5GHZ)
1004 r = IWL_RATE_6M_INDEX;
1005 else
1006 r = IWL_RATE_1M_INDEX;
1007
1008 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1009 rate_flags = 0;
1010 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
1011 rate_flags |= RATE_MCS_CCK_MSK;
1012
1013 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
1014 RATE_MCS_ANT_POS;
1015
1016 link_cmd.rs_table[i].rate_n_flags =
1017 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1018 r = iwl_get_prev_ieee_rate(r);
1019 }
1020
1021 link_cmd.general_params.single_stream_ant_msk =
1022 first_antenna(priv->hw_params.valid_tx_ant);
1023 link_cmd.general_params.dual_stream_ant_msk = 3;
1024 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1025 link_cmd.agg_params.agg_time_limit =
1026 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1027
1028 /* Update the rate scaling for control frame Tx to AP */
1029 link_cmd.sta_id = is_ap ? IWL_AP_ID : priv->hw_params.bcast_sta_id;
1030
1031 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1032 sizeof(link_cmd), &link_cmd, NULL);
1033}
1034
1035/**
1036 * iwl_rxon_add_station - add station into station table.
1037 *
1038 * there is only one AP station with id= IWL_AP_ID
1039 * NOTE: mutex must be held before calling this function
1040 */
1041int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1042{
1043 struct ieee80211_sta *sta;
1044 struct ieee80211_sta_ht_cap ht_config;
1045 struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
1046 u8 sta_id;
1047
1048 /*
1049 * Set HT capabilities. It is ok to set this struct even if not using
1050 * HT config: the priv->current_ht_config.is_ht flag will just be false
1051 */
1052 rcu_read_lock();
1053 sta = ieee80211_find_sta(priv->vif, addr);
1054 if (sta) {
1055 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1056 cur_ht_config = &ht_config;
1057 }
1058 rcu_read_unlock();
1059
1060 /* Add station to device's station table */
1061 sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
1062
1063 /* Set up default rate scaling table in device's station table */
1064 iwl_sta_init_lq(priv, addr, is_ap);
1065
1066 return sta_id;
1067}
1068EXPORT_SYMBOL(iwl_rxon_add_station);
1069
1070/**
1071 * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
1072 *
1073 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
1074 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
1075 * which requires station table entry to exist).
1076 */
1077static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
1078{
1079 int i, r;
1080 struct iwl_link_quality_cmd link_cmd = {
1081 .reserved1 = 0,
1082 };
1083 u32 rate_flags;
1084
1085 /* Set up the rate scaling to start at selected rate, fall back
1086 * all the way down to 1M in IEEE order, and then spin on 1M */
1087 if (priv->band == IEEE80211_BAND_5GHZ)
1088 r = IWL_RATE_6M_INDEX;
1089 else
1090 r = IWL_RATE_1M_INDEX;
1091
1092 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1093 rate_flags = 0;
1094 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
1095 rate_flags |= RATE_MCS_CCK_MSK;
1096
1097 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
1098 RATE_MCS_ANT_POS;
1099
1100 link_cmd.rs_table[i].rate_n_flags =
1101 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1102 r = iwl_get_prev_ieee_rate(r);
1103 }
1104
1105 link_cmd.general_params.single_stream_ant_msk =
1106 first_antenna(priv->hw_params.valid_tx_ant);
1107 link_cmd.general_params.dual_stream_ant_msk = 3;
1108 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1109 link_cmd.agg_params.agg_time_limit =
1110 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1111
1112 /* Update the rate scaling for control frame Tx to AP */
1113 link_cmd.sta_id = priv->hw_params.bcast_sta_id;
1114
1115 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1116 sizeof(link_cmd), &link_cmd, NULL);
1117}
1118
1119
1120/**
1121 * iwl_add_bcast_station - add broadcast station into station table. 1216 * iwl_add_bcast_station - add broadcast station into station table.
1122 */ 1217 */
1123void iwl_add_bcast_station(struct iwl_priv *priv) 1218void iwl_add_bcast_station(struct iwl_priv *priv)
1124{ 1219{
1125 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n"); 1220 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
1126 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); 1221 iwl_add_local_station(priv, iwl_bcast_addr, true);
1127
1128 /* Set up default rate scaling table in device's station table */
1129 iwl_sta_init_bcast_lq(priv);
1130} 1222}
1131EXPORT_SYMBOL(iwl_add_bcast_station); 1223EXPORT_SYMBOL(iwl_add_bcast_station);
1132 1224
@@ -1136,7 +1228,14 @@ EXPORT_SYMBOL(iwl_add_bcast_station);
1136void iwl3945_add_bcast_station(struct iwl_priv *priv) 1228void iwl3945_add_bcast_station(struct iwl_priv *priv)
1137{ 1229{
1138 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n"); 1230 IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
1139 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL); 1231 iwl_add_local_station(priv, iwl_bcast_addr, false);
1232 /*
1233 * It is assumed that when station is added more initialization
1234 * needs to be done, but for 3945 it is not the case and we can
1235 * just release station table access right here.
1236 */
1237 priv->stations[priv->hw_params.bcast_sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
1238
1140} 1239}
1141EXPORT_SYMBOL(iwl3945_add_bcast_station); 1240EXPORT_SYMBOL(iwl3945_add_bcast_station);
1142 1241
@@ -1159,6 +1258,13 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1159 /* If we are a client station in a BSS network, use the special 1258 /* If we are a client station in a BSS network, use the special
1160 * AP station entry (that's the only station we communicate with) */ 1259 * AP station entry (that's the only station we communicate with) */
1161 case NL80211_IFTYPE_STATION: 1260 case NL80211_IFTYPE_STATION:
1261 /*
1262 * If addition of station not complete yet, which means
1263 * that rate scaling has not been initialized, then return
1264 * the broadcast station.
1265 */
1266 if (!(priv->stations[IWL_AP_ID].used & IWL_STA_UCODE_ACTIVE))
1267 return priv->hw_params.bcast_sta_id;
1162 return IWL_AP_ID; 1268 return IWL_AP_ID;
1163 1269
1164 /* If we are an AP, then find the station, or use BCAST */ 1270 /* If we are an AP, then find the station, or use BCAST */
@@ -1175,13 +1281,6 @@ int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
1175 if (sta_id != IWL_INVALID_STATION) 1281 if (sta_id != IWL_INVALID_STATION)
1176 return sta_id; 1282 return sta_id;
1177 1283
1178 /* Create new station table entry */
1179 sta_id = iwl_add_station(priv, hdr->addr1, false,
1180 CMD_ASYNC, NULL);
1181
1182 if (sta_id != IWL_INVALID_STATION)
1183 return sta_id;
1184
1185 IWL_DEBUG_DROP(priv, "Station %pM not in station map. " 1284 IWL_DEBUG_DROP(priv, "Station %pM not in station map. "
1186 "Defaulting to broadcast...\n", 1285 "Defaulting to broadcast...\n",
1187 hdr->addr1); 1286 hdr->addr1);
@@ -1291,3 +1390,20 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
1291 1390
1292 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC); 1391 iwl_send_add_sta(priv, &priv->stations[sta_id].sta, CMD_ASYNC);
1293} 1392}
1393EXPORT_SYMBOL(iwl_sta_modify_sleep_tx_count);
1394
1395int iwl_mac_sta_remove(struct ieee80211_hw *hw,
1396 struct ieee80211_vif *vif,
1397 struct ieee80211_sta *sta)
1398{
1399 int ret;
1400 struct iwl_priv *priv = hw->priv;
1401 IWL_DEBUG_INFO(priv, "received request to remove station %pM\n",
1402 sta->addr);
1403 ret = iwl_remove_station(priv, sta);
1404 if (ret)
1405 IWL_ERR(priv, "Error removing station %pM\n",
1406 sta->addr);
1407 return ret;
1408}
1409EXPORT_SYMBOL(iwl_mac_sta_remove);
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 2dc35fe28f56..42cd2f4a01cd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -32,17 +32,23 @@
32#define HW_KEY_DYNAMIC 0 32#define HW_KEY_DYNAMIC 0
33#define HW_KEY_DEFAULT 1 33#define HW_KEY_DEFAULT 1
34 34
35#define IWL_STA_DRIVER_ACTIVE BIT(0) /* driver entry is active */
36#define IWL_STA_UCODE_ACTIVE BIT(1) /* ucode entry is active */
37#define IWL_STA_UCODE_INPROGRESS BIT(2) /* ucode entry is in process of
38 being activated */
39
40
35/** 41/**
36 * iwl_find_station - Find station id for a given BSSID 42 * iwl_find_station - Find station id for a given BSSID
37 * @bssid: MAC address of station ID to find 43 * @bssid: MAC address of station ID to find
38 */ 44 */
39u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid); 45u8 iwl_find_station(struct iwl_priv *priv, const u8 *bssid);
40 46
41int iwl_send_static_wepkey_cmd(struct iwl_priv *priv, u8 send_if_empty);
42int iwl_remove_default_wep_key(struct iwl_priv *priv, 47int iwl_remove_default_wep_key(struct iwl_priv *priv,
43 struct ieee80211_key_conf *key); 48 struct ieee80211_key_conf *key);
44int iwl_set_default_wep_key(struct iwl_priv *priv, 49int iwl_set_default_wep_key(struct iwl_priv *priv,
45 struct ieee80211_key_conf *key); 50 struct ieee80211_key_conf *key);
51int iwl_restore_default_wep_keys(struct iwl_priv *priv);
46int iwl_set_dynamic_key(struct iwl_priv *priv, 52int iwl_set_dynamic_key(struct iwl_priv *priv,
47 struct ieee80211_key_conf *key, u8 sta_id); 53 struct ieee80211_key_conf *key, u8 sta_id);
48int iwl_remove_dynamic_key(struct iwl_priv *priv, 54int iwl_remove_dynamic_key(struct iwl_priv *priv,
@@ -51,18 +57,22 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
51 struct ieee80211_key_conf *keyconf, 57 struct ieee80211_key_conf *keyconf,
52 const u8 *addr, u32 iv32, u16 *phase1key); 58 const u8 *addr, u32 iv32, u16 *phase1key);
53 59
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
55void iwl_add_bcast_station(struct iwl_priv *priv); 60void iwl_add_bcast_station(struct iwl_priv *priv);
56void iwl3945_add_bcast_station(struct iwl_priv *priv); 61void iwl3945_add_bcast_station(struct iwl_priv *priv);
57int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 62void iwl_restore_stations(struct iwl_priv *priv);
58void iwl_clear_stations_table(struct iwl_priv *priv); 63void iwl_clear_ucode_stations(struct iwl_priv *priv, bool force);
59int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 64int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
60int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 65int iwl_get_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
61int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr); 66int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr);
62int iwl_send_add_sta(struct iwl_priv *priv, 67int iwl_send_add_sta(struct iwl_priv *priv,
63 struct iwl_addsta_cmd *sta, u8 flags); 68 struct iwl_addsta_cmd *sta, u8 flags);
64u8 iwl_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap, u8 flags, 69int iwl_add_local_station(struct iwl_priv *priv, const u8 *addr, bool init_rs);
65 struct ieee80211_sta_ht_cap *ht_info); 70int iwl_add_station_common(struct iwl_priv *priv, const u8 *addr,
71 bool is_ap,
72 struct ieee80211_sta_ht_cap *ht_info,
73 u8 *sta_id_r);
74int iwl_mac_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
75 struct ieee80211_sta *sta);
66void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid); 76void iwl_sta_tx_modify_enable_tid(struct iwl_priv *priv, int sta_id, int tid);
67int iwl_sta_rx_agg_start(struct iwl_priv *priv, 77int iwl_sta_rx_agg_start(struct iwl_priv *priv,
68 const u8 *addr, int tid, u16 ssn); 78 const u8 *addr, int tid, u16 ssn);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8dd0c036d547..1ece2ea09773 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -38,47 +38,6 @@
38#include "iwl-io.h" 38#include "iwl-io.h"
39#include "iwl-helpers.h" 39#include "iwl-helpers.h"
40 40
41static const u16 default_tid_to_tx_fifo[] = {
42 IWL_TX_FIFO_AC1,
43 IWL_TX_FIFO_AC0,
44 IWL_TX_FIFO_AC0,
45 IWL_TX_FIFO_AC1,
46 IWL_TX_FIFO_AC2,
47 IWL_TX_FIFO_AC2,
48 IWL_TX_FIFO_AC3,
49 IWL_TX_FIFO_AC3,
50 IWL_TX_FIFO_NONE,
51 IWL_TX_FIFO_NONE,
52 IWL_TX_FIFO_NONE,
53 IWL_TX_FIFO_NONE,
54 IWL_TX_FIFO_NONE,
55 IWL_TX_FIFO_NONE,
56 IWL_TX_FIFO_NONE,
57 IWL_TX_FIFO_NONE,
58 IWL_TX_FIFO_AC3
59};
60
61static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
62 struct iwl_dma_ptr *ptr, size_t size)
63{
64 ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
65 GFP_KERNEL);
66 if (!ptr->addr)
67 return -ENOMEM;
68 ptr->size = size;
69 return 0;
70}
71
72static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
73 struct iwl_dma_ptr *ptr)
74{
75 if (unlikely(!ptr->addr))
76 return;
77
78 dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
79 memset(ptr, 0, sizeof(*ptr));
80}
81
82/** 41/**
83 * iwl_txq_update_write_ptr - Send new write index to hardware 42 * iwl_txq_update_write_ptr - Send new write index to hardware
84 */ 43 */
@@ -310,6 +269,8 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
310 q->high_mark = 2; 269 q->high_mark = 2;
311 270
312 q->write_ptr = q->read_ptr = 0; 271 q->write_ptr = q->read_ptr = 0;
272 q->last_read_ptr = 0;
273 q->repeat_same_read_ptr = 0;
313 274
314 return 0; 275 return 0;
315} 276}
@@ -454,611 +415,6 @@ void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
454} 415}
455EXPORT_SYMBOL(iwl_tx_queue_reset); 416EXPORT_SYMBOL(iwl_tx_queue_reset);
456 417
457/**
458 * iwl_hw_txq_ctx_free - Free TXQ Context
459 *
460 * Destroy all TX DMA queues and structures
461 */
462void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
463{
464 int txq_id;
465
466 /* Tx queues */
467 if (priv->txq) {
468 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
469 if (txq_id == IWL_CMD_QUEUE_NUM)
470 iwl_cmd_queue_free(priv);
471 else
472 iwl_tx_queue_free(priv, txq_id);
473 }
474 iwl_free_dma_ptr(priv, &priv->kw);
475
476 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
477
478 /* free tx queue structure */
479 iwl_free_txq_mem(priv);
480}
481EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
482
483/**
484 * iwl_txq_ctx_alloc - allocate TX queue context
485 * Allocate all Tx DMA structures and initialize them
486 *
487 * @param priv
488 * @return error code
489 */
490int iwl_txq_ctx_alloc(struct iwl_priv *priv)
491{
492 int ret;
493 int txq_id, slots_num;
494 unsigned long flags;
495
496 /* Free all tx/cmd queues and keep-warm buffer */
497 iwl_hw_txq_ctx_free(priv);
498
499 ret = iwl_alloc_dma_ptr(priv, &priv->scd_bc_tbls,
500 priv->hw_params.scd_bc_tbls_size);
501 if (ret) {
502 IWL_ERR(priv, "Scheduler BC Table allocation failed\n");
503 goto error_bc_tbls;
504 }
505 /* Alloc keep-warm buffer */
506 ret = iwl_alloc_dma_ptr(priv, &priv->kw, IWL_KW_SIZE);
507 if (ret) {
508 IWL_ERR(priv, "Keep Warm allocation failed\n");
509 goto error_kw;
510 }
511
512 /* allocate tx queue structure */
513 ret = iwl_alloc_txq_mem(priv);
514 if (ret)
515 goto error;
516
517 spin_lock_irqsave(&priv->lock, flags);
518
519 /* Turn off all Tx DMA fifos */
520 priv->cfg->ops->lib->txq_set_sched(priv, 0);
521
522 /* Tell NIC where to find the "keep warm" buffer */
523 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
524
525 spin_unlock_irqrestore(&priv->lock, flags);
526
527 /* Alloc and init all Tx queues, including the command queue (#4) */
528 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
529 slots_num = (txq_id == IWL_CMD_QUEUE_NUM) ?
530 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
531 ret = iwl_tx_queue_init(priv, &priv->txq[txq_id], slots_num,
532 txq_id);
533 if (ret) {
534 IWL_ERR(priv, "Tx %d queue init failed\n", txq_id);
535 goto error;
536 }
537 }
538
539 return ret;
540
541 error:
542 iwl_hw_txq_ctx_free(priv);
543 iwl_free_dma_ptr(priv, &priv->kw);
544 error_kw:
545 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
546 error_bc_tbls:
547 return ret;
548}
549
550void iwl_txq_ctx_reset(struct iwl_priv *priv)
551{
552 int txq_id, slots_num;
553 unsigned long flags;
554
555 spin_lock_irqsave(&priv->lock, flags);
556
557 /* Turn off all Tx DMA fifos */
558 priv->cfg->ops->lib->txq_set_sched(priv, 0);
559
560 /* Tell NIC where to find the "keep warm" buffer */
561 iwl_write_direct32(priv, FH_KW_MEM_ADDR_REG, priv->kw.dma >> 4);
562
563 spin_unlock_irqrestore(&priv->lock, flags);
564
565 /* Alloc and init all Tx queues, including the command queue (#4) */
566 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
567 slots_num = txq_id == IWL_CMD_QUEUE_NUM ?
568 TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
569 iwl_tx_queue_reset(priv, &priv->txq[txq_id], slots_num, txq_id);
570 }
571}
572
573/**
574 * iwl_txq_ctx_stop - Stop all Tx DMA channels
575 */
576void iwl_txq_ctx_stop(struct iwl_priv *priv)
577{
578 int ch;
579 unsigned long flags;
580
581 /* Turn off all Tx DMA fifos */
582 spin_lock_irqsave(&priv->lock, flags);
583
584 priv->cfg->ops->lib->txq_set_sched(priv, 0);
585
586 /* Stop each Tx DMA channel, and wait for it to be idle */
587 for (ch = 0; ch < priv->hw_params.dma_chnl_num; ch++) {
588 iwl_write_direct32(priv, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
589 iwl_poll_direct_bit(priv, FH_TSSR_TX_STATUS_REG,
590 FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch),
591 1000);
592 }
593 spin_unlock_irqrestore(&priv->lock, flags);
594}
595EXPORT_SYMBOL(iwl_txq_ctx_stop);
596
597/*
598 * handle build REPLY_TX command notification.
599 */
600static void iwl_tx_cmd_build_basic(struct iwl_priv *priv,
601 struct iwl_tx_cmd *tx_cmd,
602 struct ieee80211_tx_info *info,
603 struct ieee80211_hdr *hdr,
604 u8 std_id)
605{
606 __le16 fc = hdr->frame_control;
607 __le32 tx_flags = tx_cmd->tx_flags;
608
609 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
610 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
611 tx_flags |= TX_CMD_FLG_ACK_MSK;
612 if (ieee80211_is_mgmt(fc))
613 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
614 if (ieee80211_is_probe_resp(fc) &&
615 !(le16_to_cpu(hdr->seq_ctrl) & 0xf))
616 tx_flags |= TX_CMD_FLG_TSF_MSK;
617 } else {
618 tx_flags &= (~TX_CMD_FLG_ACK_MSK);
619 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
620 }
621
622 if (ieee80211_is_back_req(fc))
623 tx_flags |= TX_CMD_FLG_ACK_MSK | TX_CMD_FLG_IMM_BA_RSP_MASK;
624
625
626 tx_cmd->sta_id = std_id;
627 if (ieee80211_has_morefrags(fc))
628 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
629
630 if (ieee80211_is_data_qos(fc)) {
631 u8 *qc = ieee80211_get_qos_ctl(hdr);
632 tx_cmd->tid_tspec = qc[0] & 0xf;
633 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
634 } else {
635 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
636 }
637
638 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
639
640 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
641 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
644 if (ieee80211_is_mgmt(fc)) {
645 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
646 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
647 else
648 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
649 } else {
650 tx_cmd->timeout.pm_frame_timeout = 0;
651 }
652
653 tx_cmd->driver_txop = 0;
654 tx_cmd->tx_flags = tx_flags;
655 tx_cmd->next_frame_len = 0;
656}
657
658#define RTS_HCCA_RETRY_LIMIT 3
659#define RTS_DFAULT_RETRY_LIMIT 60
660
661static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
662 struct iwl_tx_cmd *tx_cmd,
663 struct ieee80211_tx_info *info,
664 __le16 fc, int is_hcca)
665{
666 u32 rate_flags;
667 int rate_idx;
668 u8 rts_retry_limit;
669 u8 data_retry_limit;
670 u8 rate_plcp;
671
672 /* Set retry limit on DATA packets and Probe Responses*/
673 if (ieee80211_is_probe_resp(fc))
674 data_retry_limit = 3;
675 else
676 data_retry_limit = IWL_DEFAULT_TX_RETRY;
677 tx_cmd->data_retry_limit = data_retry_limit;
678
679 /* Set retry limit on RTS packets */
680 rts_retry_limit = (is_hcca) ? RTS_HCCA_RETRY_LIMIT :
681 RTS_DFAULT_RETRY_LIMIT;
682 if (data_retry_limit < rts_retry_limit)
683 rts_retry_limit = data_retry_limit;
684 tx_cmd->rts_retry_limit = rts_retry_limit;
685
686 /* DATA packets will use the uCode station table for rate/antenna
687 * selection */
688 if (ieee80211_is_data(fc)) {
689 tx_cmd->initial_rate_index = 0;
690 tx_cmd->tx_flags |= TX_CMD_FLG_STA_RATE_MSK;
691 return;
692 }
693
694 /**
695 * If the current TX rate stored in mac80211 has the MCS bit set, it's
696 * not really a TX rate. Thus, we use the lowest supported rate for
697 * this band. Also use the lowest supported rate if the stored rate
698 * index is invalid.
699 */
700 rate_idx = info->control.rates[0].idx;
701 if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS ||
702 (rate_idx < 0) || (rate_idx > IWL_RATE_COUNT_LEGACY))
703 rate_idx = rate_lowest_index(&priv->bands[info->band],
704 info->control.sta);
705 /* For 5 GHZ band, remap mac80211 rate indices into driver indices */
706 if (info->band == IEEE80211_BAND_5GHZ)
707 rate_idx += IWL_FIRST_OFDM_RATE;
708 /* Get PLCP rate for tx_cmd->rate_n_flags */
709 rate_plcp = iwl_rates[rate_idx].plcp;
710 /* Zero out flags for this packet */
711 rate_flags = 0;
712
713 /* Set CCK flag as needed */
714 if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE))
715 rate_flags |= RATE_MCS_CCK_MSK;
716
717 /* Set up RTS and CTS flags for certain packets */
718 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
719 case cpu_to_le16(IEEE80211_STYPE_AUTH):
720 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
721 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
722 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
723 if (tx_cmd->tx_flags & TX_CMD_FLG_RTS_MSK) {
724 tx_cmd->tx_flags &= ~TX_CMD_FLG_RTS_MSK;
725 tx_cmd->tx_flags |= TX_CMD_FLG_CTS_MSK;
726 }
727 break;
728 default:
729 break;
730 }
731
732 /* Set up antennas */
733 priv->mgmt_tx_ant = iwl_toggle_tx_ant(priv, priv->mgmt_tx_ant);
734 rate_flags |= iwl_ant_idx_to_flags(priv->mgmt_tx_ant);
735
736 /* Set the rate in the TX cmd */
737 tx_cmd->rate_n_flags = iwl_hw_set_rate_n_flags(rate_plcp, rate_flags);
738}
739
740static void iwl_tx_cmd_build_hwcrypto(struct iwl_priv *priv,
741 struct ieee80211_tx_info *info,
742 struct iwl_tx_cmd *tx_cmd,
743 struct sk_buff *skb_frag,
744 int sta_id)
745{
746 struct ieee80211_key_conf *keyconf = info->control.hw_key;
747
748 switch (keyconf->alg) {
749 case ALG_CCMP:
750 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
751 memcpy(tx_cmd->key, keyconf->key, keyconf->keylen);
752 if (info->flags & IEEE80211_TX_CTL_AMPDU)
753 tx_cmd->tx_flags |= TX_CMD_FLG_AGG_CCMP_MSK;
754 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
755 break;
756
757 case ALG_TKIP:
758 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
759 ieee80211_get_tkip_key(keyconf, skb_frag,
760 IEEE80211_TKIP_P2_KEY, tx_cmd->key);
761 IWL_DEBUG_TX(priv, "tx_cmd with tkip hwcrypto\n");
762 break;
763
764 case ALG_WEP:
765 tx_cmd->sec_ctl |= (TX_CMD_SEC_WEP |
766 (keyconf->keyidx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT);
767
768 if (keyconf->keylen == WEP_KEY_LEN_128)
769 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
770
771 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
772
773 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
774 "with key %d\n", keyconf->keyidx);
775 break;
776
777 default:
778 IWL_ERR(priv, "Unknown encode alg %d\n", keyconf->alg);
779 break;
780 }
781}
782
783/*
784 * start REPLY_TX command process
785 */
786int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
787{
788 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
789 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
790 struct ieee80211_sta *sta = info->control.sta;
791 struct iwl_station_priv *sta_priv = NULL;
792 struct iwl_tx_queue *txq;
793 struct iwl_queue *q;
794 struct iwl_device_cmd *out_cmd;
795 struct iwl_cmd_meta *out_meta;
796 struct iwl_tx_cmd *tx_cmd;
797 int swq_id, txq_id;
798 dma_addr_t phys_addr;
799 dma_addr_t txcmd_phys;
800 dma_addr_t scratch_phys;
801 u16 len, len_org, firstlen, secondlen;
802 u16 seq_number = 0;
803 __le16 fc;
804 u8 hdr_len;
805 u8 sta_id;
806 u8 wait_write_ptr = 0;
807 u8 tid = 0;
808 u8 *qc = NULL;
809 unsigned long flags;
810
811 spin_lock_irqsave(&priv->lock, flags);
812 if (iwl_is_rfkill(priv)) {
813 IWL_DEBUG_DROP(priv, "Dropping - RF KILL\n");
814 goto drop_unlock;
815 }
816
817 fc = hdr->frame_control;
818
819#ifdef CONFIG_IWLWIFI_DEBUG
820 if (ieee80211_is_auth(fc))
821 IWL_DEBUG_TX(priv, "Sending AUTH frame\n");
822 else if (ieee80211_is_assoc_req(fc))
823 IWL_DEBUG_TX(priv, "Sending ASSOC frame\n");
824 else if (ieee80211_is_reassoc_req(fc))
825 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
826#endif
827
828 /* drop all non-injected data frame if we are not associated */
829 if (ieee80211_is_data(fc) &&
830 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
831 (!iwl_is_associated(priv) ||
832 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id) ||
833 !priv->assoc_station_added)) {
834 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
835 goto drop_unlock;
836 }
837
838 hdr_len = ieee80211_hdrlen(fc);
839
840 /* Find (or create) index into station table for destination station */
841 if (info->flags & IEEE80211_TX_CTL_INJECTED)
842 sta_id = priv->hw_params.bcast_sta_id;
843 else
844 sta_id = iwl_get_sta_id(priv, hdr);
845 if (sta_id == IWL_INVALID_STATION) {
846 IWL_DEBUG_DROP(priv, "Dropping - INVALID STATION: %pM\n",
847 hdr->addr1);
848 goto drop_unlock;
849 }
850
851 IWL_DEBUG_TX(priv, "station Id %d\n", sta_id);
852
853 if (sta)
854 sta_priv = (void *)sta->drv_priv;
855
856 if (sta_priv && sta_id != priv->hw_params.bcast_sta_id &&
857 sta_priv->asleep) {
858 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE));
859 /*
860 * This sends an asynchronous command to the device,
861 * but we can rely on it being processed before the
862 * next frame is processed -- and the next frame to
863 * this station is the one that will consume this
864 * counter.
865 * For now set the counter to just 1 since we do not
866 * support uAPSD yet.
867 */
868 iwl_sta_modify_sleep_tx_count(priv, sta_id, 1);
869 }
870
871 txq_id = skb_get_queue_mapping(skb);
872 if (ieee80211_is_data_qos(fc)) {
873 qc = ieee80211_get_qos_ctl(hdr);
874 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
875 if (unlikely(tid >= MAX_TID_COUNT))
876 goto drop_unlock;
877 seq_number = priv->stations[sta_id].tid[tid].seq_number;
878 seq_number &= IEEE80211_SCTL_SEQ;
879 hdr->seq_ctrl = hdr->seq_ctrl &
880 cpu_to_le16(IEEE80211_SCTL_FRAG);
881 hdr->seq_ctrl |= cpu_to_le16(seq_number);
882 seq_number += 0x10;
883 /* aggregation is on for this <sta,tid> */
884 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
885 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
886 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
887 }
888 }
889
890 txq = &priv->txq[txq_id];
891 swq_id = txq->swq_id;
892 q = &txq->q;
893
894 if (unlikely(iwl_queue_space(q) < q->high_mark))
895 goto drop_unlock;
896
897 if (ieee80211_is_data_qos(fc))
898 priv->stations[sta_id].tid[tid].tfds_in_queue++;
899
900 /* Set up driver data for this TFD */
901 memset(&(txq->txb[q->write_ptr]), 0, sizeof(struct iwl_tx_info));
902 txq->txb[q->write_ptr].skb[0] = skb;
903
904 /* Set up first empty entry in queue's array of Tx/cmd buffers */
905 out_cmd = txq->cmd[q->write_ptr];
906 out_meta = &txq->meta[q->write_ptr];
907 tx_cmd = &out_cmd->cmd.tx;
908 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
909 memset(tx_cmd, 0, sizeof(struct iwl_tx_cmd));
910
911 /*
912 * Set up the Tx-command (not MAC!) header.
913 * Store the chosen Tx queue and TFD index within the sequence field;
914 * after Tx, uCode's Tx response will return this value so driver can
915 * locate the frame within the tx queue and do post-tx processing.
916 */
917 out_cmd->hdr.cmd = REPLY_TX;
918 out_cmd->hdr.sequence = cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
919 INDEX_TO_SEQ(q->write_ptr)));
920
921 /* Copy MAC header from skb into command buffer */
922 memcpy(tx_cmd->hdr, hdr, hdr_len);
923
924
925 /* Total # bytes to be transmitted */
926 len = (u16)skb->len;
927 tx_cmd->len = cpu_to_le16(len);
928
929 if (info->control.hw_key)
930 iwl_tx_cmd_build_hwcrypto(priv, info, tx_cmd, skb, sta_id);
931
932 /* TODO need this for burst mode later on */
933 iwl_tx_cmd_build_basic(priv, tx_cmd, info, hdr, sta_id);
934 iwl_dbg_log_tx_data_frame(priv, len, hdr);
935
936 /* set is_hcca to 0; it probably will never be implemented */
937 iwl_tx_cmd_build_rate(priv, tx_cmd, info, fc, 0);
938
939 iwl_update_stats(priv, true, fc, len);
940 /*
941 * Use the first empty entry in this queue's command buffer array
942 * to contain the Tx command and MAC header concatenated together
943 * (payload data will be in another buffer).
944 * Size of this varies, due to varying MAC header length.
945 * If end is not dword aligned, we'll have 2 extra bytes at the end
946 * of the MAC header (device reads on dword boundaries).
947 * We'll tell device about this padding later.
948 */
949 len = sizeof(struct iwl_tx_cmd) +
950 sizeof(struct iwl_cmd_header) + hdr_len;
951
952 len_org = len;
953 firstlen = len = (len + 3) & ~3;
954
955 if (len_org != len)
956 len_org = 1;
957 else
958 len_org = 0;
959
960 /* Tell NIC about any 2-byte padding after MAC header */
961 if (len_org)
962 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
963
964 /* Physical address of this Tx command's header (not MAC header!),
965 * within command buffer array. */
966 txcmd_phys = pci_map_single(priv->pci_dev,
967 &out_cmd->hdr, len,
968 PCI_DMA_BIDIRECTIONAL);
969 pci_unmap_addr_set(out_meta, mapping, txcmd_phys);
970 pci_unmap_len_set(out_meta, len, len);
971 /* Add buffer containing Tx command and MAC(!) header to TFD's
972 * first entry */
973 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
974 txcmd_phys, len, 1, 0);
975
976 if (!ieee80211_has_morefrags(hdr->frame_control)) {
977 txq->need_update = 1;
978 if (qc)
979 priv->stations[sta_id].tid[tid].seq_number = seq_number;
980 } else {
981 wait_write_ptr = 1;
982 txq->need_update = 0;
983 }
984
985 /* Set up TFD's 2nd entry to point directly to remainder of skb,
986 * if any (802.11 null frames have no payload). */
987 secondlen = len = skb->len - hdr_len;
988 if (len) {
989 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
990 len, PCI_DMA_TODEVICE);
991 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
992 phys_addr, len,
993 0, 0);
994 }
995
996 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
997 offsetof(struct iwl_tx_cmd, scratch);
998
999 len = sizeof(struct iwl_tx_cmd) +
1000 sizeof(struct iwl_cmd_header) + hdr_len;
1001 /* take back ownership of DMA buffer to enable update */
1002 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
1003 len, PCI_DMA_BIDIRECTIONAL);
1004 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
1005 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
1006
1007 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
1008 le16_to_cpu(out_cmd->hdr.sequence));
1009 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
1010 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd, sizeof(*tx_cmd));
1011 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, hdr_len);
1012
1013 /* Set up entry for this TFD in Tx byte-count array */
1014 if (info->flags & IEEE80211_TX_CTL_AMPDU)
1015 priv->cfg->ops->lib->txq_update_byte_cnt_tbl(priv, txq,
1016 le16_to_cpu(tx_cmd->len));
1017
1018 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
1019 len, PCI_DMA_BIDIRECTIONAL);
1020
1021 trace_iwlwifi_dev_tx(priv,
1022 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
1023 sizeof(struct iwl_tfd),
1024 &out_cmd->hdr, firstlen,
1025 skb->data + hdr_len, secondlen);
1026
1027 /* Tell device the write index *just past* this latest filled TFD */
1028 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
1029 iwl_txq_update_write_ptr(priv, txq);
1030 spin_unlock_irqrestore(&priv->lock, flags);
1031
1032 /*
1033 * At this point the frame is "transmitted" successfully
1034 * and we will get a TX status notification eventually,
1035 * regardless of the value of ret. "ret" only indicates
1036 * whether or not we should update the write pointer.
1037 */
1038
1039 /* avoid atomic ops if it isn't an associated client */
1040 if (sta_priv && sta_priv->client)
1041 atomic_inc(&sta_priv->pending_frames);
1042
1043 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
1044 if (wait_write_ptr) {
1045 spin_lock_irqsave(&priv->lock, flags);
1046 txq->need_update = 1;
1047 iwl_txq_update_write_ptr(priv, txq);
1048 spin_unlock_irqrestore(&priv->lock, flags);
1049 } else {
1050 iwl_stop_queue(priv, txq->swq_id);
1051 }
1052 }
1053
1054 return 0;
1055
1056drop_unlock:
1057 spin_unlock_irqrestore(&priv->lock, flags);
1058 return -1;
1059}
1060EXPORT_SYMBOL(iwl_tx_skb);
1061
1062/*************** HOST COMMAND QUEUE FUNCTIONS *****/ 418/*************** HOST COMMAND QUEUE FUNCTIONS *****/
1063 419
1064/** 420/**
@@ -1192,61 +548,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1192 return idx; 548 return idx;
1193} 549}
1194 550
1195static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
1196{
1197 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1198 struct ieee80211_sta *sta;
1199 struct iwl_station_priv *sta_priv;
1200
1201 sta = ieee80211_find_sta(priv->vif, hdr->addr1);
1202 if (sta) {
1203 sta_priv = (void *)sta->drv_priv;
1204 /* avoid atomic ops if this isn't a client */
1205 if (sta_priv->client &&
1206 atomic_dec_return(&sta_priv->pending_frames) == 0)
1207 ieee80211_sta_block_awake(priv->hw, sta, false);
1208 }
1209
1210 ieee80211_tx_status_irqsafe(priv->hw, skb);
1211}
1212
1213int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1214{
1215 struct iwl_tx_queue *txq = &priv->txq[txq_id];
1216 struct iwl_queue *q = &txq->q;
1217 struct iwl_tx_info *tx_info;
1218 int nfreed = 0;
1219 struct ieee80211_hdr *hdr;
1220
1221 if ((index >= q->n_bd) || (iwl_queue_used(q, index) == 0)) {
1222 IWL_ERR(priv, "Read index for DMA queue txq id (%d), index %d, "
1223 "is out of range [0-%d] %d %d.\n", txq_id,
1224 index, q->n_bd, q->write_ptr, q->read_ptr);
1225 return 0;
1226 }
1227
1228 for (index = iwl_queue_inc_wrap(index, q->n_bd);
1229 q->read_ptr != index;
1230 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1231
1232 tx_info = &txq->txb[txq->q.read_ptr];
1233 iwl_tx_status(priv, tx_info->skb[0]);
1234
1235 hdr = (struct ieee80211_hdr *)tx_info->skb[0]->data;
1236 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
1237 nfreed++;
1238 tx_info->skb[0] = NULL;
1239
1240 if (priv->cfg->ops->lib->txq_inval_byte_cnt_tbl)
1241 priv->cfg->ops->lib->txq_inval_byte_cnt_tbl(priv, txq);
1242
1243 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
1244 }
1245 return nfreed;
1246}
1247EXPORT_SYMBOL(iwl_tx_queue_reclaim);
1248
1249
1250/** 551/**
1251 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd 552 * iwl_hcmd_queue_reclaim - Reclaim TX command queue entries already Tx'd
1252 * 553 *
@@ -1340,7 +641,7 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1340 641
1341 if (!(meta->flags & CMD_ASYNC)) { 642 if (!(meta->flags & CMD_ASYNC)) {
1342 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 643 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1343 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n", 644 IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n",
1344 get_cmd_string(cmd->hdr.cmd)); 645 get_cmd_string(cmd->hdr.cmd));
1345 wake_up_interruptible(&priv->wait_command_queue); 646 wake_up_interruptible(&priv->wait_command_queue);
1346 } 647 }
@@ -1348,358 +649,37 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1348} 649}
1349EXPORT_SYMBOL(iwl_tx_cmd_complete); 650EXPORT_SYMBOL(iwl_tx_cmd_complete);
1350 651
1351/*
1352 * Find first available (lowest unused) Tx Queue, mark it "active".
1353 * Called only when finding queue for aggregation.
1354 * Should never return anything < 7, because they should already
1355 * be in use as EDCA AC (0-3), Command (4), HCCA (5, 6).
1356 */
1357static int iwl_txq_ctx_activate_free(struct iwl_priv *priv)
1358{
1359 int txq_id;
1360
1361 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++)
1362 if (!test_and_set_bit(txq_id, &priv->txq_ctx_active_msk))
1363 return txq_id;
1364 return -1;
1365}
1366
1367int iwl_tx_agg_start(struct iwl_priv *priv, const u8 *ra, u16 tid, u16 *ssn)
1368{
1369 int sta_id;
1370 int tx_fifo;
1371 int txq_id;
1372 int ret;
1373 unsigned long flags;
1374 struct iwl_tid_data *tid_data;
1375
1376 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1377 tx_fifo = default_tid_to_tx_fifo[tid];
1378 else
1379 return -EINVAL;
1380
1381 IWL_WARN(priv, "%s on ra = %pM tid = %d\n",
1382 __func__, ra, tid);
1383
1384 sta_id = iwl_find_station(priv, ra);
1385 if (sta_id == IWL_INVALID_STATION) {
1386 IWL_ERR(priv, "Start AGG on invalid station\n");
1387 return -ENXIO;
1388 }
1389 if (unlikely(tid >= MAX_TID_COUNT))
1390 return -EINVAL;
1391
1392 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_OFF) {
1393 IWL_ERR(priv, "Start AGG when state is not IWL_AGG_OFF !\n");
1394 return -ENXIO;
1395 }
1396
1397 txq_id = iwl_txq_ctx_activate_free(priv);
1398 if (txq_id == -1) {
1399 IWL_ERR(priv, "No free aggregation queue available\n");
1400 return -ENXIO;
1401 }
1402
1403 spin_lock_irqsave(&priv->sta_lock, flags);
1404 tid_data = &priv->stations[sta_id].tid[tid];
1405 *ssn = SEQ_TO_SN(tid_data->seq_number);
1406 tid_data->agg.txq_id = txq_id;
1407 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(tx_fifo, txq_id);
1408 spin_unlock_irqrestore(&priv->sta_lock, flags);
1409
1410 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
1411 sta_id, tid, *ssn);
1412 if (ret)
1413 return ret;
1414
1415 if (tid_data->tfds_in_queue == 0) {
1416 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1417 tid_data->agg.state = IWL_AGG_ON;
1418 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1419 } else {
1420 IWL_DEBUG_HT(priv, "HW queue is NOT empty: %d packets in HW queue\n",
1421 tid_data->tfds_in_queue);
1422 tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_ADDBA;
1423 }
1424 return ret;
1425}
1426EXPORT_SYMBOL(iwl_tx_agg_start);
1427
1428int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
1429{
1430 int tx_fifo_id, txq_id, sta_id, ssn = -1;
1431 struct iwl_tid_data *tid_data;
1432 int write_ptr, read_ptr;
1433 unsigned long flags;
1434
1435 if (!ra) {
1436 IWL_ERR(priv, "ra = NULL\n");
1437 return -EINVAL;
1438 }
1439
1440 if (unlikely(tid >= MAX_TID_COUNT))
1441 return -EINVAL;
1442
1443 if (likely(tid < ARRAY_SIZE(default_tid_to_tx_fifo)))
1444 tx_fifo_id = default_tid_to_tx_fifo[tid];
1445 else
1446 return -EINVAL;
1447
1448 sta_id = iwl_find_station(priv, ra);
1449
1450 if (sta_id == IWL_INVALID_STATION) {
1451 IWL_ERR(priv, "Invalid station for AGG tid %d\n", tid);
1452 return -ENXIO;
1453 }
1454
1455 if (priv->stations[sta_id].tid[tid].agg.state ==
1456 IWL_EMPTYING_HW_QUEUE_ADDBA) {
1457 IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
1458 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1459 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1460 return 0;
1461 }
1462
1463 if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
1464 IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
1465
1466 tid_data = &priv->stations[sta_id].tid[tid];
1467 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
1468 txq_id = tid_data->agg.txq_id;
1469 write_ptr = priv->txq[txq_id].q.write_ptr;
1470 read_ptr = priv->txq[txq_id].q.read_ptr;
1471
1472 /* The queue is not empty */
1473 if (write_ptr != read_ptr) {
1474 IWL_DEBUG_HT(priv, "Stopping a non empty AGG HW QUEUE\n");
1475 priv->stations[sta_id].tid[tid].agg.state =
1476 IWL_EMPTYING_HW_QUEUE_DELBA;
1477 return 0;
1478 }
1479
1480 IWL_DEBUG_HT(priv, "HW queue is empty\n");
1481 priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
1482
1483 spin_lock_irqsave(&priv->lock, flags);
1484 /*
1485 * the only reason this call can fail is queue number out of range,
1486 * which can happen if uCode is reloaded and all the station
1487 * information are lost. if it is outside the range, there is no need
1488 * to deactivate the uCode queue, just return "success" to allow
1489 * mac80211 to clean up it own data.
1490 */
1491 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
1492 tx_fifo_id);
1493 spin_unlock_irqrestore(&priv->lock, flags);
1494
1495 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
1496
1497 return 0;
1498}
1499EXPORT_SYMBOL(iwl_tx_agg_stop);
1500
1501int iwl_txq_check_empty(struct iwl_priv *priv, int sta_id, u8 tid, int txq_id)
1502{
1503 struct iwl_queue *q = &priv->txq[txq_id].q;
1504 u8 *addr = priv->stations[sta_id].sta.sta.addr;
1505 struct iwl_tid_data *tid_data = &priv->stations[sta_id].tid[tid];
1506
1507 switch (priv->stations[sta_id].tid[tid].agg.state) {
1508 case IWL_EMPTYING_HW_QUEUE_DELBA:
1509 /* We are reclaiming the last packet of the */
1510 /* aggregated HW queue */
1511 if ((txq_id == tid_data->agg.txq_id) &&
1512 (q->read_ptr == q->write_ptr)) {
1513 u16 ssn = SEQ_TO_SN(tid_data->seq_number);
1514 int tx_fifo = default_tid_to_tx_fifo[tid];
1515 IWL_DEBUG_HT(priv, "HW queue empty: continue DELBA flow\n");
1516 priv->cfg->ops->lib->txq_agg_disable(priv, txq_id,
1517 ssn, tx_fifo);
1518 tid_data->agg.state = IWL_AGG_OFF;
1519 ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1520 }
1521 break;
1522 case IWL_EMPTYING_HW_QUEUE_ADDBA:
1523 /* We are reclaiming the last packet of the queue */
1524 if (tid_data->tfds_in_queue == 0) {
1525 IWL_DEBUG_HT(priv, "HW queue empty: continue ADDBA flow\n");
1526 tid_data->agg.state = IWL_AGG_ON;
1527 ieee80211_start_tx_ba_cb_irqsafe(priv->vif, addr, tid);
1528 }
1529 break;
1530 }
1531 return 0;
1532}
1533EXPORT_SYMBOL(iwl_txq_check_empty);
1534
1535/**
1536 * iwl_tx_status_reply_compressed_ba - Update tx status from block-ack
1537 *
1538 * Go through block-ack's bitmap of ACK'd frames, update driver's record of
1539 * ACK vs. not. This gets sent to mac80211, then to rate scaling algo.
1540 */
1541static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1542 struct iwl_ht_agg *agg,
1543 struct iwl_compressed_ba_resp *ba_resp)
1544
1545{
1546 int i, sh, ack;
1547 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1548 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1549 u64 bitmap;
1550 int successes = 0;
1551 struct ieee80211_tx_info *info;
1552
1553 if (unlikely(!agg->wait_for_ba)) {
1554 IWL_ERR(priv, "Received BA when not expected\n");
1555 return -EINVAL;
1556 }
1557
1558 /* Mark that the expected block-ack response arrived */
1559 agg->wait_for_ba = 0;
1560 IWL_DEBUG_TX_REPLY(priv, "BA %d %d\n", agg->start_idx, ba_resp->seq_ctl);
1561
1562 /* Calculate shift to align block-ack bits with our Tx window bits */
1563 sh = agg->start_idx - SEQ_TO_INDEX(seq_ctl >> 4);
1564 if (sh < 0) /* tbw something is wrong with indices */
1565 sh += 0x100;
1566
1567 /* don't use 64-bit values for now */
1568 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1569
1570 if (agg->frame_count > (64 - sh)) {
1571 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1572 return -1;
1573 }
1574
1575 /* check for success or failure according to the
1576 * transmitted bitmap and block-ack bitmap */
1577 bitmap &= agg->bitmap;
1578
1579 /* For each frame attempted in aggregation,
1580 * update driver's record of tx frame's status. */
1581 for (i = 0; i < agg->frame_count ; i++) {
1582 ack = bitmap & (1ULL << i);
1583 successes += !!ack;
1584 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1585 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1586 agg->start_idx + i);
1587 }
1588
1589 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1590 memset(&info->status, 0, sizeof(info->status));
1591 info->flags |= IEEE80211_TX_STAT_ACK;
1592 info->flags |= IEEE80211_TX_STAT_AMPDU;
1593 info->status.ampdu_ack_map = successes;
1594 info->status.ampdu_ack_len = agg->frame_count;
1595 iwl_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1596
1597 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap);
1598
1599 return 0;
1600}
1601
1602/**
1603 * iwl_rx_reply_compressed_ba - Handler for REPLY_COMPRESSED_BA
1604 *
1605 * Handles block-acknowledge notification from device, which reports success
1606 * of frames sent via aggregation.
1607 */
1608void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1609 struct iwl_rx_mem_buffer *rxb)
1610{
1611 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1612 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1613 struct iwl_tx_queue *txq = NULL;
1614 struct iwl_ht_agg *agg;
1615 int index;
1616 int sta_id;
1617 int tid;
1618
1619 /* "flow" corresponds to Tx queue */
1620 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1621
1622 /* "ssn" is start of block-ack Tx window, corresponds to index
1623 * (in Tx queue's circular buffer) of first TFD/frame in window */
1624 u16 ba_resp_scd_ssn = le16_to_cpu(ba_resp->scd_ssn);
1625
1626 if (scd_flow >= priv->hw_params.max_txq_num) {
1627 IWL_ERR(priv,
1628 "BUG_ON scd_flow is bigger than number of queues\n");
1629 return;
1630 }
1631
1632 txq = &priv->txq[scd_flow];
1633 sta_id = ba_resp->sta_id;
1634 tid = ba_resp->tid;
1635 agg = &priv->stations[sta_id].tid[tid].agg;
1636
1637 /* Find index just before block-ack window */
1638 index = iwl_queue_dec_wrap(ba_resp_scd_ssn & 0xff, txq->q.n_bd);
1639
1640 /* TODO: Need to get this copy more safely - now good for debug */
1641
1642 IWL_DEBUG_TX_REPLY(priv, "REPLY_COMPRESSED_BA [%d] Received from %pM, "
1643 "sta_id = %d\n",
1644 agg->wait_for_ba,
1645 (u8 *) &ba_resp->sta_addr_lo32,
1646 ba_resp->sta_id);
1647 IWL_DEBUG_TX_REPLY(priv, "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = "
1648 "%d, scd_ssn = %d\n",
1649 ba_resp->tid,
1650 ba_resp->seq_ctl,
1651 (unsigned long long)le64_to_cpu(ba_resp->bitmap),
1652 ba_resp->scd_flow,
1653 ba_resp->scd_ssn);
1654 IWL_DEBUG_TX_REPLY(priv, "DAT start_idx = %d, bitmap = 0x%llx \n",
1655 agg->start_idx,
1656 (unsigned long long)agg->bitmap);
1657
1658 /* Update driver's record of ACK vs. not for each frame in window */
1659 iwl_tx_status_reply_compressed_ba(priv, agg, ba_resp);
1660
1661 /* Release all TFDs before the SSN, i.e. all TFDs in front of
1662 * block-ack window (we assume that they've been successfully
1663 * transmitted ... if not, it's too late anyway). */
1664 if (txq->q.read_ptr != (ba_resp_scd_ssn & 0xff)) {
1665 /* calculate mac80211 ampdu sw queue to wake */
1666 int freed = iwl_tx_queue_reclaim(priv, scd_flow, index);
1667 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
1668
1669 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1670 priv->mac80211_registered &&
1671 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1672 iwl_wake_queue(priv, txq->swq_id);
1673
1674 iwl_txq_check_empty(priv, sta_id, tid, scd_flow);
1675 }
1676}
1677EXPORT_SYMBOL(iwl_rx_reply_compressed_ba);
1678
1679#ifdef CONFIG_IWLWIFI_DEBUG 652#ifdef CONFIG_IWLWIFI_DEBUG
1680#define TX_STATUS_ENTRY(x) case TX_STATUS_FAIL_ ## x: return #x 653#define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
654#define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1681 655
1682const char *iwl_get_tx_fail_reason(u32 status) 656const char *iwl_get_tx_fail_reason(u32 status)
1683{ 657{
1684 switch (status & TX_STATUS_MSK) { 658 switch (status & TX_STATUS_MSK) {
1685 case TX_STATUS_SUCCESS: 659 case TX_STATUS_SUCCESS:
1686 return "SUCCESS"; 660 return "SUCCESS";
1687 TX_STATUS_ENTRY(SHORT_LIMIT); 661 TX_STATUS_POSTPONE(DELAY);
1688 TX_STATUS_ENTRY(LONG_LIMIT); 662 TX_STATUS_POSTPONE(FEW_BYTES);
1689 TX_STATUS_ENTRY(FIFO_UNDERRUN); 663 TX_STATUS_POSTPONE(BT_PRIO);
1690 TX_STATUS_ENTRY(MGMNT_ABORT); 664 TX_STATUS_POSTPONE(QUIET_PERIOD);
1691 TX_STATUS_ENTRY(NEXT_FRAG); 665 TX_STATUS_POSTPONE(CALC_TTAK);
1692 TX_STATUS_ENTRY(LIFE_EXPIRE); 666 TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1693 TX_STATUS_ENTRY(DEST_PS); 667 TX_STATUS_FAIL(SHORT_LIMIT);
1694 TX_STATUS_ENTRY(ABORTED); 668 TX_STATUS_FAIL(LONG_LIMIT);
1695 TX_STATUS_ENTRY(BT_RETRY); 669 TX_STATUS_FAIL(FIFO_UNDERRUN);
1696 TX_STATUS_ENTRY(STA_INVALID); 670 TX_STATUS_FAIL(DRAIN_FLOW);
1697 TX_STATUS_ENTRY(FRAG_DROPPED); 671 TX_STATUS_FAIL(RFKILL_FLUSH);
1698 TX_STATUS_ENTRY(TID_DISABLE); 672 TX_STATUS_FAIL(LIFE_EXPIRE);
1699 TX_STATUS_ENTRY(FRAME_FLUSHED); 673 TX_STATUS_FAIL(DEST_PS);
1700 TX_STATUS_ENTRY(INSUFFICIENT_CF_POLL); 674 TX_STATUS_FAIL(HOST_ABORTED);
1701 TX_STATUS_ENTRY(TX_LOCKED); 675 TX_STATUS_FAIL(BT_RETRY);
1702 TX_STATUS_ENTRY(NO_BEACON_ON_RADAR); 676 TX_STATUS_FAIL(STA_INVALID);
677 TX_STATUS_FAIL(FRAG_DROPPED);
678 TX_STATUS_FAIL(TID_DISABLE);
679 TX_STATUS_FAIL(FIFO_FLUSHED);
680 TX_STATUS_FAIL(INSUFFICIENT_CF_POLL);
681 TX_STATUS_FAIL(FW_DROP);
682 TX_STATUS_FAIL(STA_COLOR_MISMATCH_DROP);
1703 } 683 }
1704 684
1705 return "UNKNOWN"; 685 return "UNKNOWN";
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index b55e4f39a9e1..9f362024a29c 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -352,11 +352,11 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
352 352
353static void iwl3945_unset_hw_params(struct iwl_priv *priv) 353static void iwl3945_unset_hw_params(struct iwl_priv *priv)
354{ 354{
355 if (priv->shared_virt) 355 if (priv->_3945.shared_virt)
356 dma_free_coherent(&priv->pci_dev->dev, 356 dma_free_coherent(&priv->pci_dev->dev,
357 sizeof(struct iwl3945_shared), 357 sizeof(struct iwl3945_shared),
358 priv->shared_virt, 358 priv->_3945.shared_virt,
359 priv->shared_phys); 359 priv->_3945.shared_phys);
360} 360}
361 361
362static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv, 362static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -505,15 +505,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
505 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n"); 505 IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
506#endif 506#endif
507 507
508 /* drop all non-injected data frame if we are not associated */
509 if (ieee80211_is_data(fc) &&
510 !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
511 (!iwl_is_associated(priv) ||
512 ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
513 IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
514 goto drop_unlock;
515 }
516
517 spin_unlock_irqrestore(&priv->lock, flags); 508 spin_unlock_irqrestore(&priv->lock, flags);
518 509
519 hdr_len = ieee80211_hdrlen(fc); 510 hdr_len = ieee80211_hdrlen(fc);
@@ -607,9 +598,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
607 txq->need_update = 0; 598 txq->need_update = 0;
608 } 599 }
609 600
610 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 601 IWL_DEBUG_TX(priv, "sequence nr = 0X%x\n",
611 le16_to_cpu(out_cmd->hdr.sequence)); 602 le16_to_cpu(out_cmd->hdr.sequence));
612 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags)); 603 IWL_DEBUG_TX(priv, "tx_flags = 0X%x\n", le32_to_cpu(tx_cmd->tx_flags));
613 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd)); 604 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
614 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr, 605 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
615 ieee80211_hdrlen(fc)); 606 ieee80211_hdrlen(fc));
@@ -754,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
754 if (iwl_is_associated(priv)) 745 if (iwl_is_associated(priv))
755 add_time = 746 add_time =
756 iwl3945_usecs_to_beacons( 747 iwl3945_usecs_to_beacons(
757 le64_to_cpu(params->start_time) - priv->last_tsf, 748 le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
758 le16_to_cpu(priv->rxon_timing.beacon_interval)); 749 le16_to_cpu(priv->rxon_timing.beacon_interval));
759 750
760 memset(&spectrum, 0, sizeof(spectrum)); 751 memset(&spectrum, 0, sizeof(spectrum));
@@ -768,7 +759,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
768 759
769 if (iwl_is_associated(priv)) 760 if (iwl_is_associated(priv))
770 spectrum.start_time = 761 spectrum.start_time =
771 iwl3945_add_beacon_time(priv->last_beacon_time, 762 iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
772 add_time, 763 add_time,
773 le16_to_cpu(priv->rxon_timing.beacon_interval)); 764 le16_to_cpu(priv->rxon_timing.beacon_interval));
774 else 765 else
@@ -1613,9 +1604,6 @@ static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
1613 return pos; 1604 return pos;
1614} 1605}
1615 1606
1616/* For sanity check only. Actual size is determined by uCode, typ. 512 */
1617#define IWL3945_MAX_EVENT_LOG_SIZE (512)
1618
1619#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20) 1607#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
1620 1608
1621int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log, 1609int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
@@ -1642,16 +1630,16 @@ int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
1642 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32))); 1630 num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
1643 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32))); 1631 next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
1644 1632
1645 if (capacity > IWL3945_MAX_EVENT_LOG_SIZE) { 1633 if (capacity > priv->cfg->max_event_log_size) {
1646 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n", 1634 IWL_ERR(priv, "Log capacity %d is bogus, limit to %d entries\n",
1647 capacity, IWL3945_MAX_EVENT_LOG_SIZE); 1635 capacity, priv->cfg->max_event_log_size);
1648 capacity = IWL3945_MAX_EVENT_LOG_SIZE; 1636 capacity = priv->cfg->max_event_log_size;
1649 } 1637 }
1650 1638
1651 if (next_entry > IWL3945_MAX_EVENT_LOG_SIZE) { 1639 if (next_entry > priv->cfg->max_event_log_size) {
1652 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n", 1640 IWL_ERR(priv, "Log write index %d is bogus, limit to %d\n",
1653 next_entry, IWL3945_MAX_EVENT_LOG_SIZE); 1641 next_entry, priv->cfg->max_event_log_size);
1654 next_entry = IWL3945_MAX_EVENT_LOG_SIZE; 1642 next_entry = priv->cfg->max_event_log_size;
1655 } 1643 }
1656 1644
1657 size = num_wraps ? capacity : next_entry; 1645 size = num_wraps ? capacity : next_entry;
@@ -1947,7 +1935,7 @@ static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
1947 added++; 1935 added++;
1948 } 1936 }
1949 1937
1950 IWL_DEBUG_SCAN(priv, "total channels to scan %d \n", added); 1938 IWL_DEBUG_SCAN(priv, "total channels to scan %d\n", added);
1951 return added; 1939 return added;
1952} 1940}
1953 1941
@@ -2490,8 +2478,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2490 goto restart; 2478 goto restart;
2491 } 2479 }
2492 2480
2493 iwl_clear_stations_table(priv);
2494
2495 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG); 2481 rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
2496 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill); 2482 IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
2497 2483
@@ -2513,13 +2499,19 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2513 /* After the ALIVE response, we can send commands to 3945 uCode */ 2499 /* After the ALIVE response, we can send commands to 3945 uCode */
2514 set_bit(STATUS_ALIVE, &priv->status); 2500 set_bit(STATUS_ALIVE, &priv->status);
2515 2501
2502 if (priv->cfg->ops->lib->recover_from_tx_stall) {
2503 /* Enable timer to monitor the driver queues */
2504 mod_timer(&priv->monitor_recover,
2505 jiffies +
2506 msecs_to_jiffies(priv->cfg->monitor_recover_period));
2507 }
2508
2516 if (iwl_is_rfkill(priv)) 2509 if (iwl_is_rfkill(priv))
2517 return; 2510 return;
2518 2511
2519 ieee80211_wake_queues(priv->hw); 2512 ieee80211_wake_queues(priv->hw);
2520 2513
2521 priv->active_rate = priv->rates_mask; 2514 priv->active_rate = IWL_RATES_MASK;
2522 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
2523 2515
2524 iwl_power_update_mode(priv, true); 2516 iwl_power_update_mode(priv, true);
2525 2517
@@ -2548,17 +2540,6 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2548 set_bit(STATUS_READY, &priv->status); 2540 set_bit(STATUS_READY, &priv->status);
2549 wake_up_interruptible(&priv->wait_command_queue); 2541 wake_up_interruptible(&priv->wait_command_queue);
2550 2542
2551 /* reassociate for ADHOC mode */
2552 if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
2553 struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
2554 priv->vif);
2555 if (beacon)
2556 iwl_mac_beacon_update(priv->hw, beacon);
2557 }
2558
2559 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
2560 iwl_set_mode(priv, priv->iw_mode);
2561
2562 return; 2543 return;
2563 2544
2564 restart: 2545 restart:
@@ -2580,7 +2561,8 @@ static void __iwl3945_down(struct iwl_priv *priv)
2580 if (!exit_pending) 2561 if (!exit_pending)
2581 set_bit(STATUS_EXIT_PENDING, &priv->status); 2562 set_bit(STATUS_EXIT_PENDING, &priv->status);
2582 2563
2583 iwl_clear_stations_table(priv); 2564 /* Station information will now be cleared in device */
2565 iwl_clear_ucode_stations(priv, true);
2584 2566
2585 /* Unblock any waiting calls */ 2567 /* Unblock any waiting calls */
2586 wake_up_interruptible_all(&priv->wait_command_queue); 2568 wake_up_interruptible_all(&priv->wait_command_queue);
@@ -2714,12 +2696,10 @@ static int __iwl3945_up(struct iwl_priv *priv)
2714 2696
2715 for (i = 0; i < MAX_HW_RESTARTS; i++) { 2697 for (i = 0; i < MAX_HW_RESTARTS; i++) {
2716 2698
2717 iwl_clear_stations_table(priv);
2718
2719 /* load bootstrap state machine, 2699 /* load bootstrap state machine,
2720 * load bootstrap program into processor's memory, 2700 * load bootstrap program into processor's memory,
2721 * prepare to load the "initialize" uCode */ 2701 * prepare to load the "initialize" uCode */
2722 priv->cfg->ops->lib->load_ucode(priv); 2702 rc = priv->cfg->ops->lib->load_ucode(priv);
2723 2703
2724 if (rc) { 2704 if (rc) {
2725 IWL_ERR(priv, 2705 IWL_ERR(priv,
@@ -2787,7 +2767,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2787static void iwl3945_rfkill_poll(struct work_struct *data) 2767static void iwl3945_rfkill_poll(struct work_struct *data)
2788{ 2768{
2789 struct iwl_priv *priv = 2769 struct iwl_priv *priv =
2790 container_of(data, struct iwl_priv, rfkill_poll.work); 2770 container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
2791 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status); 2771 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2792 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL) 2772 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2793 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 2773 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
@@ -2806,7 +2786,7 @@ static void iwl3945_rfkill_poll(struct work_struct *data)
2806 2786
2807 /* Keep this running, even if radio now enabled. This will be 2787 /* Keep this running, even if radio now enabled. This will be
2808 * cancelled in mac_start() if system decides to start again */ 2788 * cancelled in mac_start() if system decides to start again */
2809 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2789 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
2810 round_jiffies_relative(2 * HZ)); 2790 round_jiffies_relative(2 * HZ));
2811 2791
2812} 2792}
@@ -2821,7 +2801,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2821 .len = sizeof(struct iwl3945_scan_cmd), 2801 .len = sizeof(struct iwl3945_scan_cmd),
2822 .flags = CMD_SIZE_HUGE, 2802 .flags = CMD_SIZE_HUGE,
2823 }; 2803 };
2824 int rc = 0;
2825 struct iwl3945_scan_cmd *scan; 2804 struct iwl3945_scan_cmd *scan;
2826 struct ieee80211_conf *conf = NULL; 2805 struct ieee80211_conf *conf = NULL;
2827 u8 n_probes = 0; 2806 u8 n_probes = 0;
@@ -2849,7 +2828,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2849 if (test_bit(STATUS_SCAN_HW, &priv->status)) { 2828 if (test_bit(STATUS_SCAN_HW, &priv->status)) {
2850 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests " 2829 IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests "
2851 "Ignoring second request.\n"); 2830 "Ignoring second request.\n");
2852 rc = -EIO;
2853 goto done; 2831 goto done;
2854 } 2832 }
2855 2833
@@ -2884,7 +2862,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2884 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) + 2862 priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
2885 IWL_MAX_SCAN_SIZE, GFP_KERNEL); 2863 IWL_MAX_SCAN_SIZE, GFP_KERNEL);
2886 if (!priv->scan) { 2864 if (!priv->scan) {
2887 rc = -ENOMEM; 2865 IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
2888 goto done; 2866 goto done;
2889 } 2867 }
2890 } 2868 }
@@ -2927,7 +2905,9 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2927 scan_suspend_time, interval); 2905 scan_suspend_time, interval);
2928 } 2906 }
2929 2907
2930 if (priv->scan_request->n_ssids) { 2908 if (priv->is_internal_short_scan) {
2909 IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
2910 } else if (priv->scan_request->n_ssids) {
2931 int i, p = 0; 2911 int i, p = 0;
2932 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n"); 2912 IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
2933 for (i = 0; i < priv->scan_request->n_ssids; i++) { 2913 for (i = 0; i < priv->scan_request->n_ssids; i++) {
@@ -2974,13 +2954,20 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
2974 goto done; 2954 goto done;
2975 } 2955 }
2976 2956
2977 scan->tx_cmd.len = cpu_to_le16( 2957 if (!priv->is_internal_short_scan) {
2958 scan->tx_cmd.len = cpu_to_le16(
2978 iwl_fill_probe_req(priv, 2959 iwl_fill_probe_req(priv,
2979 (struct ieee80211_mgmt *)scan->data, 2960 (struct ieee80211_mgmt *)scan->data,
2980 priv->scan_request->ie, 2961 priv->scan_request->ie,
2981 priv->scan_request->ie_len, 2962 priv->scan_request->ie_len,
2982 IWL_MAX_SCAN_SIZE - sizeof(*scan))); 2963 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2983 2964 } else {
2965 scan->tx_cmd.len = cpu_to_le16(
2966 iwl_fill_probe_req(priv,
2967 (struct ieee80211_mgmt *)scan->data,
2968 NULL, 0,
2969 IWL_MAX_SCAN_SIZE - sizeof(*scan)));
2970 }
2984 /* select Rx antennas */ 2971 /* select Rx antennas */
2985 scan->flags |= iwl3945_get_antenna_flags(priv); 2972 scan->flags |= iwl3945_get_antenna_flags(priv);
2986 2973
@@ -3002,8 +2989,7 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
3002 scan->len = cpu_to_le16(cmd.len); 2989 scan->len = cpu_to_le16(cmd.len);
3003 2990
3004 set_bit(STATUS_SCAN_HW, &priv->status); 2991 set_bit(STATUS_SCAN_HW, &priv->status);
3005 rc = iwl_send_cmd_sync(priv, &cmd); 2992 if (iwl_send_cmd_sync(priv, &cmd))
3006 if (rc)
3007 goto done; 2993 goto done;
3008 2994
3009 queue_delayed_work(priv->workqueue, &priv->scan_check, 2995 queue_delayed_work(priv->workqueue, &priv->scan_check,
@@ -3135,12 +3121,13 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3135 case NL80211_IFTYPE_ADHOC: 3121 case NL80211_IFTYPE_ADHOC:
3136 3122
3137 priv->assoc_id = 1; 3123 priv->assoc_id = 1;
3138 iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL); 3124 iwl_add_local_station(priv, priv->bssid, false);
3139 iwl3945_sync_sta(priv, IWL_STA_ID, 3125 iwl3945_sync_sta(priv, IWL_STA_ID,
3140 (priv->band == IEEE80211_BAND_5GHZ) ? 3126 (priv->band == IEEE80211_BAND_5GHZ) ?
3141 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP, 3127 IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
3142 CMD_ASYNC); 3128 CMD_ASYNC);
3143 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID); 3129 iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
3130
3144 iwl3945_send_beacon_cmd(priv); 3131 iwl3945_send_beacon_cmd(priv);
3145 3132
3146 break; 3133 break;
@@ -3151,8 +3138,6 @@ void iwl3945_post_associate(struct iwl_priv *priv)
3151 break; 3138 break;
3152 } 3139 }
3153 3140
3154 iwl_activate_qos(priv, 0);
3155
3156 /* we have just associated, don't start scan too early */ 3141 /* we have just associated, don't start scan too early */
3157 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN; 3142 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
3158} 3143}
@@ -3213,7 +3198,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3213 3198
3214 /* ucode is running and will send rfkill notifications, 3199 /* ucode is running and will send rfkill notifications,
3215 * no need to poll the killswitch state anymore */ 3200 * no need to poll the killswitch state anymore */
3216 cancel_delayed_work(&priv->rfkill_poll); 3201 cancel_delayed_work(&priv->_3945.rfkill_poll);
3217 3202
3218 iwl_led_start(priv); 3203 iwl_led_start(priv);
3219 3204
@@ -3254,7 +3239,7 @@ static void iwl3945_mac_stop(struct ieee80211_hw *hw)
3254 flush_workqueue(priv->workqueue); 3239 flush_workqueue(priv->workqueue);
3255 3240
3256 /* start polling the killswitch state again */ 3241 /* start polling the killswitch state again */
3257 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 3242 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
3258 round_jiffies_relative(2 * HZ)); 3243 round_jiffies_relative(2 * HZ));
3259 3244
3260 IWL_DEBUG_MAC80211(priv, "leave\n"); 3245 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -3325,7 +3310,7 @@ void iwl3945_config_ap(struct iwl_priv *priv)
3325 /* restore RXON assoc */ 3310 /* restore RXON assoc */
3326 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK; 3311 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
3327 iwlcore_commit_rxon(priv); 3312 iwlcore_commit_rxon(priv);
3328 iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL); 3313 iwl_add_local_station(priv, iwl_bcast_addr, false);
3329 } 3314 }
3330 iwl3945_send_beacon_cmd(priv); 3315 iwl3945_send_beacon_cmd(priv);
3331 3316
@@ -3366,7 +3351,6 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3366 3351
3367 mutex_lock(&priv->mutex); 3352 mutex_lock(&priv->mutex);
3368 iwl_scan_cancel_timeout(priv, 100); 3353 iwl_scan_cancel_timeout(priv, 100);
3369 mutex_unlock(&priv->mutex);
3370 3354
3371 switch (cmd) { 3355 switch (cmd) {
3372 case SET_KEY: 3356 case SET_KEY:
@@ -3387,11 +3371,44 @@ static int iwl3945_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3387 ret = -EINVAL; 3371 ret = -EINVAL;
3388 } 3372 }
3389 3373
3374 mutex_unlock(&priv->mutex);
3390 IWL_DEBUG_MAC80211(priv, "leave\n"); 3375 IWL_DEBUG_MAC80211(priv, "leave\n");
3391 3376
3392 return ret; 3377 return ret;
3393} 3378}
3394 3379
3380static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
3381 struct ieee80211_vif *vif,
3382 struct ieee80211_sta *sta)
3383{
3384 struct iwl_priv *priv = hw->priv;
3385 int ret;
3386 bool is_ap = priv->iw_mode == NL80211_IFTYPE_STATION;
3387 u8 sta_id;
3388
3389 IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
3390 sta->addr);
3391
3392 ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
3393 &sta_id);
3394 if (ret) {
3395 IWL_ERR(priv, "Unable to add station %pM (%d)\n",
3396 sta->addr, ret);
3397 /* Should we return success if return code is EEXIST ? */
3398 return ret;
3399 }
3400
3401 /* Initialize rate scaling */
3402 IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM\n",
3403 sta->addr);
3404 iwl3945_rs_rate_init(priv, sta, sta_id);
3405
3406 return 0;
3407
3408
3409
3410 return ret;
3411}
3395/***************************************************************************** 3412/*****************************************************************************
3396 * 3413 *
3397 * sysfs attributes 3414 * sysfs attributes
@@ -3591,7 +3608,7 @@ static ssize_t store_measurement(struct device *d,
3591 struct iwl_priv *priv = dev_get_drvdata(d); 3608 struct iwl_priv *priv = dev_get_drvdata(d);
3592 struct ieee80211_measurement_params params = { 3609 struct ieee80211_measurement_params params = {
3593 .channel = le16_to_cpu(priv->active_rxon.channel), 3610 .channel = le16_to_cpu(priv->active_rxon.channel),
3594 .start_time = cpu_to_le64(priv->last_tsf), 3611 .start_time = cpu_to_le64(priv->_3945.last_tsf),
3595 .duration = cpu_to_le16(1), 3612 .duration = cpu_to_le16(1),
3596 }; 3613 };
3597 u8 type = IWL_MEASURE_BASIC; 3614 u8 type = IWL_MEASURE_BASIC;
@@ -3661,7 +3678,7 @@ static ssize_t show_statistics(struct device *d,
3661 struct iwl_priv *priv = dev_get_drvdata(d); 3678 struct iwl_priv *priv = dev_get_drvdata(d);
3662 u32 size = sizeof(struct iwl3945_notif_statistics); 3679 u32 size = sizeof(struct iwl3945_notif_statistics);
3663 u32 len = 0, ofs = 0; 3680 u32 len = 0, ofs = 0;
3664 u8 *data = (u8 *)&priv->statistics_39; 3681 u8 *data = (u8 *)&priv->_3945.statistics;
3665 int rc = 0; 3682 int rc = 0;
3666 3683
3667 if (!iwl_is_alive(priv)) 3684 if (!iwl_is_alive(priv))
@@ -3774,7 +3791,7 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3774 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update); 3791 INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
3775 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start); 3792 INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
3776 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start); 3793 INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
3777 INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll); 3794 INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
3778 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed); 3795 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
3779 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan); 3796 INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
3780 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan); 3797 INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
@@ -3782,6 +3799,13 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3782 3799
3783 iwl3945_hw_setup_deferred_work(priv); 3800 iwl3945_hw_setup_deferred_work(priv);
3784 3801
3802 if (priv->cfg->ops->lib->recover_from_tx_stall) {
3803 init_timer(&priv->monitor_recover);
3804 priv->monitor_recover.data = (unsigned long)priv;
3805 priv->monitor_recover.function =
3806 priv->cfg->ops->lib->recover_from_tx_stall;
3807 }
3808
3785 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3809 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3786 iwl3945_irq_tasklet, (unsigned long)priv); 3810 iwl3945_irq_tasklet, (unsigned long)priv);
3787} 3811}
@@ -3794,6 +3818,8 @@ static void iwl3945_cancel_deferred_work(struct iwl_priv *priv)
3794 cancel_delayed_work(&priv->scan_check); 3818 cancel_delayed_work(&priv->scan_check);
3795 cancel_delayed_work(&priv->alive_start); 3819 cancel_delayed_work(&priv->alive_start);
3796 cancel_work_sync(&priv->beacon_update); 3820 cancel_work_sync(&priv->beacon_update);
3821 if (priv->cfg->ops->lib->recover_from_tx_stall)
3822 del_timer_sync(&priv->monitor_recover);
3797} 3823}
3798 3824
3799static struct attribute *iwl3945_sysfs_entries[] = { 3825static struct attribute *iwl3945_sysfs_entries[] = {
@@ -3831,7 +3857,9 @@ static struct ieee80211_ops iwl3945_hw_ops = {
3831 .conf_tx = iwl_mac_conf_tx, 3857 .conf_tx = iwl_mac_conf_tx,
3832 .reset_tsf = iwl_mac_reset_tsf, 3858 .reset_tsf = iwl_mac_reset_tsf,
3833 .bss_info_changed = iwl_bss_info_changed, 3859 .bss_info_changed = iwl_bss_info_changed,
3834 .hw_scan = iwl_mac_hw_scan 3860 .hw_scan = iwl_mac_hw_scan,
3861 .sta_add = iwl3945_mac_sta_add,
3862 .sta_remove = iwl_mac_sta_remove,
3835}; 3863};
3836 3864
3837static int iwl3945_init_drv(struct iwl_priv *priv) 3865static int iwl3945_init_drv(struct iwl_priv *priv)
@@ -3850,9 +3878,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3850 mutex_init(&priv->mutex); 3878 mutex_init(&priv->mutex);
3851 mutex_init(&priv->sync_cmd_mutex); 3879 mutex_init(&priv->sync_cmd_mutex);
3852 3880
3853 /* Clear the driver's (not device's) station table */
3854 iwl_clear_stations_table(priv);
3855
3856 priv->ieee_channels = NULL; 3881 priv->ieee_channels = NULL;
3857 priv->ieee_rates = NULL; 3882 priv->ieee_rates = NULL;
3858 priv->band = IEEE80211_BAND_2GHZ; 3883 priv->band = IEEE80211_BAND_2GHZ;
@@ -3860,12 +3885,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3860 priv->iw_mode = NL80211_IFTYPE_STATION; 3885 priv->iw_mode = NL80211_IFTYPE_STATION;
3861 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3886 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3862 3887
3863 iwl_reset_qos(priv);
3864
3865 priv->qos_data.qos_active = 0;
3866 priv->qos_data.qos_cap.val = 0;
3867
3868 priv->rates_mask = IWL_RATES_MASK;
3869 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3888 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3870 3889
3871 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3890 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@ -4130,7 +4149,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4130 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err); 4149 IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
4131 4150
4132 /* Start monitoring the killswitch */ 4151 /* Start monitoring the killswitch */
4133 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 4152 queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
4134 2 * HZ); 4153 2 * HZ);
4135 4154
4136 return 0; 4155 return 0;
@@ -4204,7 +4223,7 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4204 4223
4205 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group); 4224 sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
4206 4225
4207 cancel_delayed_work_sync(&priv->rfkill_poll); 4226 cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
4208 4227
4209 iwl3945_dealloc_ucode_pci(priv); 4228 iwl3945_dealloc_ucode_pci(priv);
4210 4229
@@ -4213,7 +4232,6 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4213 iwl3945_hw_txq_ctx_free(priv); 4232 iwl3945_hw_txq_ctx_free(priv);
4214 4233
4215 iwl3945_unset_hw_params(priv); 4234 iwl3945_unset_hw_params(priv);
4216 iwl_clear_stations_table(priv);
4217 4235
4218 /*netif_stop_queue(dev); */ 4236 /*netif_stop_queue(dev); */
4219 flush_workqueue(priv->workqueue); 4237 flush_workqueue(priv->workqueue);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index b9d34a766964..03f998d098c5 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -17,7 +17,7 @@ config IWM
17config IWM_DEBUG 17config IWM_DEBUG
18 bool "Enable full debugging output in iwmc3200wifi" 18 bool "Enable full debugging output in iwmc3200wifi"
19 depends on IWM && DEBUG_FS 19 depends on IWM && DEBUG_FS
20 ---help--- 20 help
21 This option will enable debug tracing and setting for iwm 21 This option will enable debug tracing and setting for iwm
22 22
23 You can set the debug level and module through debugfs. By 23 You can set the debug level and module through debugfs. By
@@ -30,3 +30,10 @@ config IWM_DEBUG
30 Or, if you want the full debug, for all modules: 30 Or, if you want the full debug, for all modules:
31 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level 31 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/level
32 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules 32 echo 0xff > /sys/kernel/debug/iwm/phyN/debug/modules
33
34config IWM_TRACING
35 bool "Enable event tracing for iwmc3200wifi"
36 depends on IWM && EVENT_TRACING
37 help
38 Say Y here to trace all the commands and responses between
39 the driver and firmware (including TX/RX frames) with ftrace.
diff --git a/drivers/net/wireless/iwmc3200wifi/Makefile b/drivers/net/wireless/iwmc3200wifi/Makefile
index d34291b652d3..aeed5cd80819 100644
--- a/drivers/net/wireless/iwmc3200wifi/Makefile
+++ b/drivers/net/wireless/iwmc3200wifi/Makefile
@@ -3,3 +3,6 @@ iwmc3200wifi-objs += main.o netdev.o rx.o tx.o sdio.o hal.o fw.o
3iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o 3iwmc3200wifi-objs += commands.o cfg80211.o eeprom.o
4 4
5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o 5iwmc3200wifi-$(CONFIG_IWM_DEBUG) += debugfs.o
6iwmc3200wifi-$(CONFIG_IWM_TRACING) += trace.o
7
8CFLAGS_trace.o := -I$(src)
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a1d45cce0ebc..902e95f70f6e 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -264,7 +264,7 @@ static int iwm_cfg80211_get_station(struct wiphy *wiphy,
264int iwm_cfg80211_inform_bss(struct iwm_priv *iwm) 264int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
265{ 265{
266 struct wiphy *wiphy = iwm_to_wiphy(iwm); 266 struct wiphy *wiphy = iwm_to_wiphy(iwm);
267 struct iwm_bss_info *bss, *next; 267 struct iwm_bss_info *bss;
268 struct iwm_umac_notif_bss_info *umac_bss; 268 struct iwm_umac_notif_bss_info *umac_bss;
269 struct ieee80211_mgmt *mgmt; 269 struct ieee80211_mgmt *mgmt;
270 struct ieee80211_channel *channel; 270 struct ieee80211_channel *channel;
@@ -272,7 +272,7 @@ int iwm_cfg80211_inform_bss(struct iwm_priv *iwm)
272 s32 signal; 272 s32 signal;
273 int freq; 273 int freq;
274 274
275 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) { 275 list_for_each_entry(bss, &iwm->bss_list, node) {
276 umac_bss = bss->bss; 276 umac_bss = bss->bss;
277 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf); 277 mgmt = (struct ieee80211_mgmt *)(umac_bss->frame_buf);
278 278
@@ -726,23 +726,26 @@ static int iwm_cfg80211_set_power_mgmt(struct wiphy *wiphy,
726 CFG_POWER_INDEX, iwm->conf.power_index); 726 CFG_POWER_INDEX, iwm->conf.power_index);
727} 727}
728 728
729int iwm_cfg80211_set_pmksa(struct wiphy *wiphy, struct net_device *netdev, 729static int iwm_cfg80211_set_pmksa(struct wiphy *wiphy,
730 struct cfg80211_pmksa *pmksa) 730 struct net_device *netdev,
731 struct cfg80211_pmksa *pmksa)
731{ 732{
732 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 733 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
733 734
734 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD); 735 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_ADD);
735} 736}
736 737
737int iwm_cfg80211_del_pmksa(struct wiphy *wiphy, struct net_device *netdev, 738static int iwm_cfg80211_del_pmksa(struct wiphy *wiphy,
738 struct cfg80211_pmksa *pmksa) 739 struct net_device *netdev,
740 struct cfg80211_pmksa *pmksa)
739{ 741{
740 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 742 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
741 743
742 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL); 744 return iwm_send_pmkid_update(iwm, pmksa, IWM_CMD_PMKID_DEL);
743} 745}
744 746
745int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) 747static int iwm_cfg80211_flush_pmksa(struct wiphy *wiphy,
748 struct net_device *netdev)
746{ 749{
747 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 750 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
748 struct cfg80211_pmksa pmksa; 751 struct cfg80211_pmksa pmksa;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 42df7262f9f7..330c7d9cf101 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -507,7 +507,7 @@ static int iwm_target_read(struct iwm_priv *iwm, __le32 address,
507 return ret; 507 return ret;
508 } 508 }
509 509
510 /* When succeding, the send_target routine returns the seq number */ 510 /* When succeeding, the send_target routine returns the seq number */
511 seq_num = ret; 511 seq_num = ret;
512 512
513 ret = wait_event_interruptible_timeout(iwm->nonwifi_queue, 513 ret = wait_event_interruptible_timeout(iwm->nonwifi_queue,
@@ -782,10 +782,9 @@ int iwm_send_mlme_profile(struct iwm_priv *iwm)
782 return 0; 782 return 0;
783} 783}
784 784
785int iwm_invalidate_mlme_profile(struct iwm_priv *iwm) 785int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
786{ 786{
787 struct iwm_umac_invalidate_profile invalid; 787 struct iwm_umac_invalidate_profile invalid;
788 int ret;
789 788
790 invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE; 789 invalid.hdr.oid = UMAC_WIFI_IF_CMD_INVALIDATE_PROFILE;
791 invalid.hdr.buf_size = 790 invalid.hdr.buf_size =
@@ -794,7 +793,14 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
794 793
795 invalid.reason = WLAN_REASON_UNSPECIFIED; 794 invalid.reason = WLAN_REASON_UNSPECIFIED;
796 795
797 ret = iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1); 796 return iwm_send_wifi_if_cmd(iwm, &invalid, sizeof(invalid), 1);
797}
798
799int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
800{
801 int ret;
802
803 ret = __iwm_invalidate_mlme_profile(iwm);
798 if (ret) 804 if (ret)
799 return ret; 805 return ret;
800 806
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index 3dfd9f0e9003..7e16bcf59978 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -488,6 +488,7 @@ int iwm_umac_set_config_var(struct iwm_priv *iwm, u16 key,
488 void *payload, u16 payload_size); 488 void *payload, u16 payload_size);
489int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags); 489int iwm_send_umac_config(struct iwm_priv *iwm, __le32 reset_flags);
490int iwm_send_mlme_profile(struct iwm_priv *iwm); 490int iwm_send_mlme_profile(struct iwm_priv *iwm);
491int __iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
491int iwm_invalidate_mlme_profile(struct iwm_priv *iwm); 492int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
492int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); 493int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
493int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); 494int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index cbb81befdb55..724441368a18 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -266,7 +266,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
266 size_t count, loff_t *ppos) 266 size_t count, loff_t *ppos)
267{ 267{
268 struct iwm_priv *iwm = filp->private_data; 268 struct iwm_priv *iwm = filp->private_data;
269 struct iwm_rx_ticket_node *ticket, *next; 269 struct iwm_rx_ticket_node *ticket;
270 char *buf; 270 char *buf;
271 int buf_len = 4096, i; 271 int buf_len = 4096, i;
272 size_t len = 0; 272 size_t len = 0;
@@ -281,7 +281,8 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
281 if (!buf) 281 if (!buf)
282 return -ENOMEM; 282 return -ENOMEM;
283 283
284 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { 284 spin_lock(&iwm->ticket_lock);
285 list_for_each_entry(ticket, &iwm->rx_tickets, node) {
285 len += snprintf(buf + len, buf_len - len, "Ticket #%d\n", 286 len += snprintf(buf + len, buf_len - len, "Ticket #%d\n",
286 ticket->ticket->id); 287 ticket->ticket->id);
287 len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n", 288 len += snprintf(buf + len, buf_len - len, "\taction: 0x%x\n",
@@ -289,14 +290,17 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
289 len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n", 290 len += snprintf(buf + len, buf_len - len, "\tflags: 0x%x\n",
290 ticket->ticket->flags); 291 ticket->ticket->flags);
291 } 292 }
293 spin_unlock(&iwm->ticket_lock);
292 294
293 for (i = 0; i < IWM_RX_ID_HASH; i++) { 295 for (i = 0; i < IWM_RX_ID_HASH; i++) {
294 struct iwm_rx_packet *packet, *nxt; 296 struct iwm_rx_packet *packet;
295 struct list_head *pkt_list = &iwm->rx_packets[i]; 297 struct list_head *pkt_list = &iwm->rx_packets[i];
298
296 if (!list_empty(pkt_list)) { 299 if (!list_empty(pkt_list)) {
297 len += snprintf(buf + len, buf_len - len, 300 len += snprintf(buf + len, buf_len - len,
298 "Packet hash #%d\n", i); 301 "Packet hash #%d\n", i);
299 list_for_each_entry_safe(packet, nxt, pkt_list, node) { 302 spin_lock(&iwm->packet_lock[i]);
303 list_for_each_entry(packet, pkt_list, node) {
300 len += snprintf(buf + len, buf_len - len, 304 len += snprintf(buf + len, buf_len - len,
301 "\tPacket id: %d\n", 305 "\tPacket id: %d\n",
302 packet->id); 306 packet->id);
@@ -304,6 +308,7 @@ static ssize_t iwm_debugfs_rx_ticket_read(struct file *filp,
304 "\tPacket length: %lu\n", 308 "\tPacket length: %lu\n",
305 packet->pkt_size); 309 packet->pkt_size);
306 } 310 }
311 spin_unlock(&iwm->packet_lock[i]);
307 } 312 }
308 } 313 }
309 314
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.c b/drivers/net/wireless/iwmc3200wifi/hal.c
index 229de990379c..9531b18cf72a 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.c
+++ b/drivers/net/wireless/iwmc3200wifi/hal.c
@@ -105,6 +105,7 @@
105#include "hal.h" 105#include "hal.h"
106#include "umac.h" 106#include "umac.h"
107#include "debug.h" 107#include "debug.h"
108#include "trace.h"
108 109
109static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm, 110static int iwm_nonwifi_cmd_init(struct iwm_priv *iwm,
110 struct iwm_nonwifi_cmd *cmd, 111 struct iwm_nonwifi_cmd *cmd,
@@ -207,9 +208,9 @@ void iwm_cmd_flush(struct iwm_priv *iwm)
207 208
208struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num) 209struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
209{ 210{
210 struct iwm_wifi_cmd *cmd, *next; 211 struct iwm_wifi_cmd *cmd;
211 212
212 list_for_each_entry_safe(cmd, next, &iwm->wifi_pending_cmd, pending) 213 list_for_each_entry(cmd, &iwm->wifi_pending_cmd, pending)
213 if (cmd->seq_num == seq_num) { 214 if (cmd->seq_num == seq_num) {
214 list_del(&cmd->pending); 215 list_del(&cmd->pending);
215 return cmd; 216 return cmd;
@@ -218,12 +219,12 @@ struct iwm_wifi_cmd *iwm_get_pending_wifi_cmd(struct iwm_priv *iwm, u16 seq_num)
218 return NULL; 219 return NULL;
219} 220}
220 221
221struct iwm_nonwifi_cmd * 222struct iwm_nonwifi_cmd *iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm,
222iwm_get_pending_nonwifi_cmd(struct iwm_priv *iwm, u8 seq_num, u8 cmd_opcode) 223 u8 seq_num, u8 cmd_opcode)
223{ 224{
224 struct iwm_nonwifi_cmd *cmd, *next; 225 struct iwm_nonwifi_cmd *cmd;
225 226
226 list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) 227 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
227 if ((cmd->seq_num == seq_num) && 228 if ((cmd->seq_num == seq_num) &&
228 (cmd->udma_cmd.opcode == cmd_opcode) && 229 (cmd->udma_cmd.opcode == cmd_opcode) &&
229 (cmd->resp_received)) { 230 (cmd->resp_received)) {
@@ -277,6 +278,7 @@ static int iwm_send_udma_nonwifi_cmd(struct iwm_priv *iwm,
277 udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr, 278 udma_cmd->handle_by_hw, cmd->seq_num, udma_cmd->addr,
278 udma_cmd->op1_sz, udma_cmd->op2); 279 udma_cmd->op1_sz, udma_cmd->op2);
279 280
281 trace_iwm_tx_nonwifi_cmd(iwm, udma_hdr);
280 return iwm_bus_send_chunk(iwm, buf->start, buf->len); 282 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
281} 283}
282 284
@@ -363,6 +365,7 @@ static int iwm_send_udma_wifi_cmd(struct iwm_priv *iwm,
363 return ret; 365 return ret;
364 } 366 }
365 367
368 trace_iwm_tx_wifi_cmd(iwm, umac_hdr);
366 return iwm_bus_send_chunk(iwm, buf->start, buf->len); 369 return iwm_bus_send_chunk(iwm, buf->start, buf->len);
367} 370}
368 371
diff --git a/drivers/net/wireless/iwmc3200wifi/hal.h b/drivers/net/wireless/iwmc3200wifi/hal.h
index 0adfdc85765d..c20936d9b6b7 100644
--- a/drivers/net/wireless/iwmc3200wifi/hal.h
+++ b/drivers/net/wireless/iwmc3200wifi/hal.h
@@ -75,7 +75,8 @@ do { \
75 75
76 76
77/* UDMA IN OP CODE -- cmd bits [3:0] */ 77/* UDMA IN OP CODE -- cmd bits [3:0] */
78#define UDMA_IN_OPCODE_MASK 0xF 78#define UDMA_HDI_IN_NW_CMD_OPCODE_POS 0
79#define UDMA_HDI_IN_NW_CMD_OPCODE_SEED 0xF
79 80
80#define UDMA_IN_OPCODE_GENERAL_RESP 0x0 81#define UDMA_IN_OPCODE_GENERAL_RESP 0x0
81#define UDMA_IN_OPCODE_READ_RESP 0x1 82#define UDMA_IN_OPCODE_READ_RESP 0x1
@@ -130,7 +131,7 @@ do { \
130#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \ 131#define IWM_MAX_WIFI_CMD_BUFF_SIZE (IWM_SDIO_FW_MAX_CHUNK_SIZE - \
131 IWM_MAX_WIFI_HEADERS_SIZE) 132 IWM_MAX_WIFI_HEADERS_SIZE)
132 133
133#define IWM_HAL_CONCATENATE_BUF_SIZE 8192 134#define IWM_HAL_CONCATENATE_BUF_SIZE (32 * 1024)
134 135
135struct iwm_wifi_cmd_buff { 136struct iwm_wifi_cmd_buff {
136 u16 len; 137 u16 len;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 79ffa3b98d73..13266c3842f8 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -48,6 +48,7 @@
48#include "umac.h" 48#include "umac.h"
49#include "lmac.h" 49#include "lmac.h"
50#include "eeprom.h" 50#include "eeprom.h"
51#include "trace.h"
51 52
52#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation" 53#define IWM_COPYRIGHT "Copyright(c) 2009 Intel Corporation"
53#define IWM_AUTHOR "<ilw@linux.intel.com>" 54#define IWM_AUTHOR "<ilw@linux.intel.com>"
@@ -268,7 +269,9 @@ struct iwm_priv {
268 269
269 struct sk_buff_head rx_list; 270 struct sk_buff_head rx_list;
270 struct list_head rx_tickets; 271 struct list_head rx_tickets;
272 spinlock_t ticket_lock;
271 struct list_head rx_packets[IWM_RX_ID_HASH]; 273 struct list_head rx_packets[IWM_RX_ID_HASH];
274 spinlock_t packet_lock[IWM_RX_ID_HASH];
272 struct workqueue_struct *rx_wq; 275 struct workqueue_struct *rx_wq;
273 struct work_struct rx_worker; 276 struct work_struct rx_worker;
274 277
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index 23856d359e12..362002735b12 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -277,8 +277,11 @@ int iwm_priv_init(struct iwm_priv *iwm)
277 277
278 skb_queue_head_init(&iwm->rx_list); 278 skb_queue_head_init(&iwm->rx_list);
279 INIT_LIST_HEAD(&iwm->rx_tickets); 279 INIT_LIST_HEAD(&iwm->rx_tickets);
280 for (i = 0; i < IWM_RX_ID_HASH; i++) 280 spin_lock_init(&iwm->ticket_lock);
281 for (i = 0; i < IWM_RX_ID_HASH; i++) {
281 INIT_LIST_HEAD(&iwm->rx_packets[i]); 282 INIT_LIST_HEAD(&iwm->rx_packets[i]);
283 spin_lock_init(&iwm->packet_lock[i]);
284 }
282 285
283 INIT_WORK(&iwm->rx_worker, iwm_rx_worker); 286 INIT_WORK(&iwm->rx_worker, iwm_rx_worker);
284 287
@@ -424,9 +427,9 @@ int iwm_notif_send(struct iwm_priv *iwm, struct iwm_wifi_cmd *cmd,
424static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd, 427static struct iwm_notif *iwm_notif_find(struct iwm_priv *iwm, u32 cmd,
425 u8 source) 428 u8 source)
426{ 429{
427 struct iwm_notif *notif, *next; 430 struct iwm_notif *notif;
428 431
429 list_for_each_entry_safe(notif, next, &iwm->pending_notif, pending) { 432 list_for_each_entry(notif, &iwm->pending_notif, pending) {
430 if ((notif->cmd_id == cmd) && (notif->src == source)) { 433 if ((notif->cmd_id == cmd) && (notif->src == source)) {
431 list_del(&notif->pending); 434 list_del(&notif->pending);
432 return notif; 435 return notif;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 3257d4fad835..ad5398779240 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -343,15 +343,17 @@ static void iwm_rx_ticket_node_free(struct iwm_rx_ticket_node *ticket_node)
343static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id) 343static struct iwm_rx_packet *iwm_rx_packet_get(struct iwm_priv *iwm, u16 id)
344{ 344{
345 u8 id_hash = IWM_RX_ID_GET_HASH(id); 345 u8 id_hash = IWM_RX_ID_GET_HASH(id);
346 struct list_head *packet_list; 346 struct iwm_rx_packet *packet;
347 struct iwm_rx_packet *packet, *next;
348
349 packet_list = &iwm->rx_packets[id_hash];
350 347
351 list_for_each_entry_safe(packet, next, packet_list, node) 348 spin_lock(&iwm->packet_lock[id_hash]);
352 if (packet->id == id) 349 list_for_each_entry(packet, &iwm->rx_packets[id_hash], node)
350 if (packet->id == id) {
351 list_del(&packet->node);
352 spin_unlock(&iwm->packet_lock[id_hash]);
353 return packet; 353 return packet;
354 }
354 355
356 spin_unlock(&iwm->packet_lock[id_hash]);
355 return NULL; 357 return NULL;
356} 358}
357 359
@@ -389,18 +391,22 @@ void iwm_rx_free(struct iwm_priv *iwm)
389 struct iwm_rx_packet *packet, *np; 391 struct iwm_rx_packet *packet, *np;
390 int i; 392 int i;
391 393
394 spin_lock(&iwm->ticket_lock);
392 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) { 395 list_for_each_entry_safe(ticket, nt, &iwm->rx_tickets, node) {
393 list_del(&ticket->node); 396 list_del(&ticket->node);
394 iwm_rx_ticket_node_free(ticket); 397 iwm_rx_ticket_node_free(ticket);
395 } 398 }
399 spin_unlock(&iwm->ticket_lock);
396 400
397 for (i = 0; i < IWM_RX_ID_HASH; i++) { 401 for (i = 0; i < IWM_RX_ID_HASH; i++) {
402 spin_lock(&iwm->packet_lock[i]);
398 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i], 403 list_for_each_entry_safe(packet, np, &iwm->rx_packets[i],
399 node) { 404 node) {
400 list_del(&packet->node); 405 list_del(&packet->node);
401 kfree_skb(packet->skb); 406 kfree_skb(packet->skb);
402 kfree(packet); 407 kfree(packet);
403 } 408 }
409 spin_unlock(&iwm->packet_lock[i]);
404 } 410 }
405} 411}
406 412
@@ -428,7 +434,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
428 ticket->action == IWM_RX_TICKET_RELEASE ? 434 ticket->action == IWM_RX_TICKET_RELEASE ?
429 "RELEASE" : "DROP", 435 "RELEASE" : "DROP",
430 ticket->id); 436 ticket->id);
437 spin_lock(&iwm->ticket_lock);
431 list_add_tail(&ticket_node->node, &iwm->rx_tickets); 438 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
439 spin_unlock(&iwm->ticket_lock);
432 440
433 /* 441 /*
434 * We received an Rx ticket, most likely there's 442 * We received an Rx ticket, most likely there's
@@ -461,6 +469,7 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
461 struct iwm_rx_packet *packet; 469 struct iwm_rx_packet *packet;
462 u16 id, buf_offset; 470 u16 id, buf_offset;
463 u32 packet_size; 471 u32 packet_size;
472 u8 id_hash;
464 473
465 IWM_DBG_RX(iwm, DBG, "\n"); 474 IWM_DBG_RX(iwm, DBG, "\n");
466 475
@@ -478,7 +487,10 @@ static int iwm_ntf_rx_packet(struct iwm_priv *iwm, u8 *buf,
478 if (IS_ERR(packet)) 487 if (IS_ERR(packet))
479 return PTR_ERR(packet); 488 return PTR_ERR(packet);
480 489
481 list_add_tail(&packet->node, &iwm->rx_packets[IWM_RX_ID_GET_HASH(id)]); 490 id_hash = IWM_RX_ID_GET_HASH(id);
491 spin_lock(&iwm->packet_lock[id_hash]);
492 list_add_tail(&packet->node, &iwm->rx_packets[id_hash]);
493 spin_unlock(&iwm->packet_lock[id_hash]);
482 494
483 /* We might (unlikely) have received the packet _after_ the ticket */ 495 /* We might (unlikely) have received the packet _after_ the ticket */
484 queue_work(iwm->rx_wq, &iwm->rx_worker); 496 queue_work(iwm->rx_wq, &iwm->rx_worker);
@@ -519,6 +531,8 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
519 unsigned long buf_size, 531 unsigned long buf_size,
520 struct iwm_wifi_cmd *cmd) 532 struct iwm_wifi_cmd *cmd)
521{ 533{
534 struct wiphy *wiphy = iwm_to_wiphy(iwm);
535 struct ieee80211_channel *chan;
522 struct iwm_umac_notif_assoc_complete *complete = 536 struct iwm_umac_notif_assoc_complete *complete =
523 (struct iwm_umac_notif_assoc_complete *)buf; 537 (struct iwm_umac_notif_assoc_complete *)buf;
524 538
@@ -527,6 +541,18 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
527 541
528 switch (le32_to_cpu(complete->status)) { 542 switch (le32_to_cpu(complete->status)) {
529 case UMAC_ASSOC_COMPLETE_SUCCESS: 543 case UMAC_ASSOC_COMPLETE_SUCCESS:
544 chan = ieee80211_get_channel(wiphy,
545 ieee80211_channel_to_frequency(complete->channel));
546 if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) {
547 /* Associated to a unallowed channel, disassociate. */
548 __iwm_invalidate_mlme_profile(iwm);
549 IWM_WARN(iwm, "Couldn't associate with %pM due to "
550 "channel %d is disabled. Check your local "
551 "regulatory setting.\n",
552 complete->bssid, complete->channel);
553 goto failure;
554 }
555
530 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status); 556 set_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
531 memcpy(iwm->bssid, complete->bssid, ETH_ALEN); 557 memcpy(iwm->bssid, complete->bssid, ETH_ALEN);
532 iwm->channel = complete->channel; 558 iwm->channel = complete->channel;
@@ -563,6 +589,7 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
563 GFP_KERNEL); 589 GFP_KERNEL);
564 break; 590 break;
565 case UMAC_ASSOC_COMPLETE_FAILURE: 591 case UMAC_ASSOC_COMPLETE_FAILURE:
592 failure:
566 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status); 593 clear_bit(IWM_STATUS_ASSOCIATED, &iwm->status);
567 memset(iwm->bssid, 0, ETH_ALEN); 594 memset(iwm->bssid, 0, ETH_ALEN);
568 iwm->channel = 0; 595 iwm->channel = 0;
@@ -757,7 +784,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
757 (struct iwm_umac_notif_bss_info *)buf; 784 (struct iwm_umac_notif_bss_info *)buf;
758 struct ieee80211_channel *channel; 785 struct ieee80211_channel *channel;
759 struct ieee80211_supported_band *band; 786 struct ieee80211_supported_band *band;
760 struct iwm_bss_info *bss, *next; 787 struct iwm_bss_info *bss;
761 s32 signal; 788 s32 signal;
762 int freq; 789 int freq;
763 u16 frame_len = le16_to_cpu(umac_bss->frame_len); 790 u16 frame_len = le16_to_cpu(umac_bss->frame_len);
@@ -776,7 +803,7 @@ static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
776 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi); 803 IWM_DBG_MLME(iwm, DBG, "\tRSSI: %d\n", umac_bss->rssi);
777 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len); 804 IWM_DBG_MLME(iwm, DBG, "\tFrame Length: %d\n", frame_len);
778 805
779 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) 806 list_for_each_entry(bss, &iwm->bss_list, node)
780 if (bss->bss->table_idx == umac_bss->table_idx) 807 if (bss->bss->table_idx == umac_bss->table_idx)
781 break; 808 break;
782 809
@@ -843,16 +870,15 @@ static int iwm_mlme_remove_bss(struct iwm_priv *iwm, u8 *buf,
843 int i; 870 int i;
844 871
845 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) { 872 for (i = 0; i < le32_to_cpu(bss_rm->count); i++) {
846 table_idx = (le16_to_cpu(bss_rm->entries[i]) 873 table_idx = le16_to_cpu(bss_rm->entries[i]) &
847 & IWM_BSS_REMOVE_INDEX_MSK); 874 IWM_BSS_REMOVE_INDEX_MSK;
848 list_for_each_entry_safe(bss, next, &iwm->bss_list, node) 875 list_for_each_entry_safe(bss, next, &iwm->bss_list, node)
849 if (bss->bss->table_idx == cpu_to_le16(table_idx)) { 876 if (bss->bss->table_idx == cpu_to_le16(table_idx)) {
850 struct ieee80211_mgmt *mgmt; 877 struct ieee80211_mgmt *mgmt;
851 878
852 mgmt = (struct ieee80211_mgmt *) 879 mgmt = (struct ieee80211_mgmt *)
853 (bss->bss->frame_buf); 880 (bss->bss->frame_buf);
854 IWM_DBG_MLME(iwm, ERR, 881 IWM_DBG_MLME(iwm, ERR, "BSS removed: %pM\n",
855 "BSS removed: %pM\n",
856 mgmt->bssid); 882 mgmt->bssid);
857 list_del(&bss->node); 883 list_del(&bss->node);
858 kfree(bss->bss); 884 kfree(bss->bss);
@@ -1224,18 +1250,24 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1224 u8 source, cmd_id; 1250 u8 source, cmd_id;
1225 u16 seq_num; 1251 u16 seq_num;
1226 u32 count; 1252 u32 count;
1227 u8 resp;
1228 1253
1229 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf; 1254 wifi_hdr = (struct iwm_umac_wifi_in_hdr *)buf;
1230 cmd_id = wifi_hdr->sw_hdr.cmd.cmd; 1255 cmd_id = wifi_hdr->sw_hdr.cmd.cmd;
1231
1232 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE); 1256 source = GET_VAL32(wifi_hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
1233 if (source >= IWM_SRC_NUM) { 1257 if (source >= IWM_SRC_NUM) {
1234 IWM_CRIT(iwm, "invalid source %d\n", source); 1258 IWM_CRIT(iwm, "invalid source %d\n", source);
1235 return -EINVAL; 1259 return -EINVAL;
1236 } 1260 }
1237 1261
1238 count = (GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT)); 1262 if (cmd_id == REPLY_RX_MPDU_CMD)
1263 trace_iwm_rx_packet(iwm, buf, buf_size);
1264 else if ((cmd_id == UMAC_NOTIFY_OPCODE_RX_TICKET) &&
1265 (source == UMAC_HDI_IN_SOURCE_FW))
1266 trace_iwm_rx_ticket(iwm, buf, buf_size);
1267 else
1268 trace_iwm_rx_wifi_cmd(iwm, wifi_hdr);
1269
1270 count = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
1239 count += sizeof(struct iwm_umac_wifi_in_hdr) - 1271 count += sizeof(struct iwm_umac_wifi_in_hdr) -
1240 sizeof(struct iwm_dev_cmd_hdr); 1272 sizeof(struct iwm_dev_cmd_hdr);
1241 if (count > buf_size) { 1273 if (count > buf_size) {
@@ -1243,8 +1275,6 @@ static int iwm_rx_handle_wifi(struct iwm_priv *iwm, u8 *buf,
1243 return -EINVAL; 1275 return -EINVAL;
1244 } 1276 }
1245 1277
1246 resp = GET_VAL32(wifi_hdr->sw_hdr.meta_data, UMAC_FW_CMD_STATUS);
1247
1248 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num); 1278 seq_num = le16_to_cpu(wifi_hdr->sw_hdr.cmd.seq_num);
1249 1279
1250 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n", 1280 IWM_DBG_RX(iwm, DBG, "CMD:0x%x, source: 0x%x, seqnum: %d\n",
@@ -1317,8 +1347,9 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1317{ 1347{
1318 u8 seq_num; 1348 u8 seq_num;
1319 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf; 1349 struct iwm_udma_in_hdr *hdr = (struct iwm_udma_in_hdr *)buf;
1320 struct iwm_nonwifi_cmd *cmd, *next; 1350 struct iwm_nonwifi_cmd *cmd;
1321 1351
1352 trace_iwm_rx_nonwifi_cmd(iwm, buf, buf_size);
1322 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM); 1353 seq_num = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
1323 1354
1324 /* 1355 /*
@@ -1329,7 +1360,7 @@ static int iwm_rx_handle_nonwifi(struct iwm_priv *iwm, u8 *buf,
1329 * That means we only support synchronised non wifi command response 1360 * That means we only support synchronised non wifi command response
1330 * schemes. 1361 * schemes.
1331 */ 1362 */
1332 list_for_each_entry_safe(cmd, next, &iwm->nonwifi_pending_cmd, pending) 1363 list_for_each_entry(cmd, &iwm->nonwifi_pending_cmd, pending)
1333 if (cmd->seq_num == seq_num) { 1364 if (cmd->seq_num == seq_num) {
1334 cmd->resp_received = 1; 1365 cmd->resp_received = 1;
1335 cmd->buf.len = buf_size; 1366 cmd->buf.len = buf_size;
@@ -1648,6 +1679,7 @@ void iwm_rx_worker(struct work_struct *work)
1648 * We stop whenever a ticket is missing its packet, as we're 1679 * We stop whenever a ticket is missing its packet, as we're
1649 * supposed to send the packets in order. 1680 * supposed to send the packets in order.
1650 */ 1681 */
1682 spin_lock(&iwm->ticket_lock);
1651 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) { 1683 list_for_each_entry_safe(ticket, next, &iwm->rx_tickets, node) {
1652 struct iwm_rx_packet *packet = 1684 struct iwm_rx_packet *packet =
1653 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id)); 1685 iwm_rx_packet_get(iwm, le16_to_cpu(ticket->ticket->id));
@@ -1656,12 +1688,12 @@ void iwm_rx_worker(struct work_struct *work)
1656 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d " 1688 IWM_DBG_RX(iwm, DBG, "Skip rx_work: Wait for ticket %d "
1657 "to be handled first\n", 1689 "to be handled first\n",
1658 le16_to_cpu(ticket->ticket->id)); 1690 le16_to_cpu(ticket->ticket->id));
1659 return; 1691 break;
1660 } 1692 }
1661 1693
1662 list_del(&ticket->node); 1694 list_del(&ticket->node);
1663 list_del(&packet->node);
1664 iwm_rx_process_packet(iwm, packet, ticket); 1695 iwm_rx_process_packet(iwm, packet, ticket);
1665 } 1696 }
1697 spin_unlock(&iwm->ticket_lock);
1666} 1698}
1667 1699
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.c b/drivers/net/wireless/iwmc3200wifi/trace.c
new file mode 100644
index 000000000000..904d36f22311
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.c
@@ -0,0 +1,3 @@
1#include "iwm.h"
2#define CREATE_TRACE_POINTS
3#include "trace.h"
diff --git a/drivers/net/wireless/iwmc3200wifi/trace.h b/drivers/net/wireless/iwmc3200wifi/trace.h
new file mode 100644
index 000000000000..320e54fbb38c
--- /dev/null
+++ b/drivers/net/wireless/iwmc3200wifi/trace.h
@@ -0,0 +1,283 @@
1#if !defined(__IWM_TRACE_H__) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWM_TRACE_H__
3
4#include <linux/tracepoint.h>
5
6#if !defined(CONFIG_IWM_TRACING)
7#undef TRACE_EVENT
8#define TRACE_EVENT(name, proto, ...) \
9static inline void trace_ ## name(proto) {}
10#endif
11
12#undef TRACE_SYSTEM
13#define TRACE_SYSTEM iwm
14
15#define IWM_ENTRY __array(char, ndev_name, 16)
16#define IWM_ASSIGN strlcpy(__entry->ndev_name, iwm_to_ndev(iwm)->name, 16)
17#define IWM_PR_FMT "%s"
18#define IWM_PR_ARG __entry->ndev_name
19
20TRACE_EVENT(iwm_tx_nonwifi_cmd,
21 TP_PROTO(struct iwm_priv *iwm, struct iwm_udma_out_nonwifi_hdr *hdr),
22
23 TP_ARGS(iwm, hdr),
24
25 TP_STRUCT__entry(
26 IWM_ENTRY
27 __field(u8, opcode)
28 __field(u8, resp)
29 __field(u8, eot)
30 __field(u8, hw)
31 __field(u16, seq)
32 __field(u32, addr)
33 __field(u32, op1)
34 __field(u32, op2)
35 ),
36
37 TP_fast_assign(
38 IWM_ASSIGN;
39 __entry->opcode = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_OPCODE);
40 __entry->resp = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_RESP);
41 __entry->eot = GET_VAL32(hdr->cmd, UMAC_HDI_OUT_CMD_EOT);
42 __entry->hw = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_NW_CMD_HANDLE_BY_HW);
43 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_OUT_CMD_NON_WIFI_HW_SEQ_NUM);
44 __entry->addr = le32_to_cpu(hdr->addr);
45 __entry->op1 = le32_to_cpu(hdr->op1_sz);
46 __entry->op2 = le32_to_cpu(hdr->op2);
47 ),
48
49 TP_printk(
50 IWM_PR_FMT " Tx TARGET CMD: opcode 0x%x, resp %d, eot %d, "
51 "hw %d, seq 0x%x, addr 0x%x, op1 0x%x, op2 0x%x",
52 IWM_PR_ARG, __entry->opcode, __entry->resp, __entry->eot,
53 __entry->hw, __entry->seq, __entry->addr, __entry->op1,
54 __entry->op2
55 )
56);
57
58TRACE_EVENT(iwm_tx_wifi_cmd,
59 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_out_hdr *hdr),
60
61 TP_ARGS(iwm, hdr),
62
63 TP_STRUCT__entry(
64 IWM_ENTRY
65 __field(u8, opcode)
66 __field(u8, lmac)
67 __field(u8, resp)
68 __field(u8, eot)
69 __field(u8, ra_tid)
70 __field(u8, credit_group)
71 __field(u8, color)
72 __field(u16, seq)
73 ),
74
75 TP_fast_assign(
76 IWM_ASSIGN;
77 __entry->opcode = hdr->sw_hdr.cmd.cmd;
78 __entry->lmac = 0;
79 __entry->seq = hdr->sw_hdr.cmd.seq_num;
80 __entry->resp = GET_VAL8(hdr->sw_hdr.cmd.flags, UMAC_DEV_CMD_FLAGS_RESP_REQ);
81 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
82 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
83 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
84 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
85 if (__entry->opcode == UMAC_CMD_OPCODE_WIFI_PASS_THROUGH ||
86 __entry->opcode == UMAC_CMD_OPCODE_WIFI_IF_WRAPPER) {
87 __entry->lmac = 1;
88 __entry->opcode = ((struct iwm_lmac_hdr *)(hdr + 1))->id;
89 }
90 ),
91
92 TP_printk(
93 IWM_PR_FMT " Tx %cMAC CMD: opcode 0x%x, resp %d, eot %d, "
94 "seq 0x%x, sta_color 0x%x, ra_tid 0x%x, credit_group 0x%x",
95 IWM_PR_ARG, __entry->lmac ? 'L' : 'U', __entry->opcode,
96 __entry->resp, __entry->eot, __entry->seq, __entry->color,
97 __entry->ra_tid, __entry->credit_group
98 )
99);
100
101TRACE_EVENT(iwm_tx_packets,
102 TP_PROTO(struct iwm_priv *iwm, u8 *buf, int len),
103
104 TP_ARGS(iwm, buf, len),
105
106 TP_STRUCT__entry(
107 IWM_ENTRY
108 __field(u8, eot)
109 __field(u8, ra_tid)
110 __field(u8, credit_group)
111 __field(u8, color)
112 __field(u16, seq)
113 __field(u8, npkt)
114 __field(u32, bytes)
115 ),
116
117 TP_fast_assign(
118 struct iwm_umac_wifi_out_hdr *hdr =
119 (struct iwm_umac_wifi_out_hdr *)buf;
120
121 IWM_ASSIGN;
122 __entry->eot = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_OUT_CMD_EOT);
123 __entry->ra_tid = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_RATID);
124 __entry->credit_group = GET_VAL32(hdr->hw_hdr.meta_data, UMAC_HDI_OUT_CREDIT_GRP);
125 __entry->color = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_TX_STA_COLOR);
126 __entry->seq = hdr->sw_hdr.cmd.seq_num;
127 __entry->npkt = 1;
128 __entry->bytes = len;
129
130 if (!__entry->eot) {
131 int count;
132 u8 *ptr = buf;
133
134 __entry->npkt = 0;
135 while (ptr < buf + len) {
136 count = GET_VAL32(hdr->sw_hdr.meta_data,
137 UMAC_FW_CMD_BYTE_COUNT);
138 ptr += ALIGN(sizeof(*hdr) + count, 16);
139 hdr = (struct iwm_umac_wifi_out_hdr *)ptr;
140 __entry->npkt++;
141 }
142 }
143 ),
144
145 TP_printk(
146 IWM_PR_FMT " Tx %spacket: eot %d, seq 0x%x, sta_color 0x%x, "
147 "ra_tid 0x%x, credit_group 0x%x, embeded_packets %d, %d bytes",
148 IWM_PR_ARG, !__entry->eot ? "concatenated " : "",
149 __entry->eot, __entry->seq, __entry->color, __entry->ra_tid,
150 __entry->credit_group, __entry->npkt, __entry->bytes
151 )
152);
153
154TRACE_EVENT(iwm_rx_nonwifi_cmd,
155 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
156
157 TP_ARGS(iwm, buf, len),
158
159 TP_STRUCT__entry(
160 IWM_ENTRY
161 __field(u8, opcode)
162 __field(u16, seq)
163 __field(u32, len)
164 ),
165
166 TP_fast_assign(
167 struct iwm_udma_in_hdr *hdr = buf;
168
169 IWM_ASSIGN;
170 __entry->opcode = GET_VAL32(hdr->cmd, UDMA_HDI_IN_NW_CMD_OPCODE);
171 __entry->seq = GET_VAL32(hdr->cmd, UDMA_HDI_IN_CMD_NON_WIFI_HW_SEQ_NUM);
172 __entry->len = len;
173 ),
174
175 TP_printk(
176 IWM_PR_FMT " Rx TARGET RESP: opcode 0x%x, seq 0x%x, len 0x%x",
177 IWM_PR_ARG, __entry->opcode, __entry->seq, __entry->len
178 )
179);
180
181TRACE_EVENT(iwm_rx_wifi_cmd,
182 TP_PROTO(struct iwm_priv *iwm, struct iwm_umac_wifi_in_hdr *hdr),
183
184 TP_ARGS(iwm, hdr),
185
186 TP_STRUCT__entry(
187 IWM_ENTRY
188 __field(u8, cmd)
189 __field(u8, source)
190 __field(u16, seq)
191 __field(u32, count)
192 ),
193
194 TP_fast_assign(
195 IWM_ASSIGN;
196 __entry->cmd = hdr->sw_hdr.cmd.cmd;
197 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
198 __entry->count = GET_VAL32(hdr->sw_hdr.meta_data, UMAC_FW_CMD_BYTE_COUNT);
199 __entry->seq = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
200 ),
201
202 TP_printk(
203 IWM_PR_FMT " Rx %s RESP: cmd 0x%x, seq 0x%x, count 0x%x",
204 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ? "LMAC" :
205 __entry->source == UMAC_HDI_IN_SOURCE_FW ? "UMAC" : "UDMA",
206 __entry->cmd, __entry->seq, __entry->count
207 )
208);
209
210#define iwm_ticket_action_symbol \
211 { IWM_RX_TICKET_DROP, "DROP" }, \
212 { IWM_RX_TICKET_RELEASE, "RELEASE" }, \
213 { IWM_RX_TICKET_SNIFFER, "SNIFFER" }, \
214 { IWM_RX_TICKET_ENQUEUE, "ENQUEUE" }
215
216TRACE_EVENT(iwm_rx_ticket,
217 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
218
219 TP_ARGS(iwm, buf, len),
220
221 TP_STRUCT__entry(
222 IWM_ENTRY
223 __field(u8, action)
224 __field(u8, reason)
225 __field(u16, id)
226 __field(u16, flags)
227 ),
228
229 TP_fast_assign(
230 struct iwm_rx_ticket *ticket =
231 ((struct iwm_umac_notif_rx_ticket *)buf)->tickets;
232
233 IWM_ASSIGN;
234 __entry->id = le16_to_cpu(ticket->id);
235 __entry->action = le16_to_cpu(ticket->action);
236 __entry->flags = le16_to_cpu(ticket->flags);
237 __entry->reason = (__entry->flags & IWM_RX_TICKET_DROP_REASON_MSK) >> IWM_RX_TICKET_DROP_REASON_POS;
238 ),
239
240 TP_printk(
241 IWM_PR_FMT " Rx ticket: id 0x%x, action %s, %s 0x%x%s",
242 IWM_PR_ARG, __entry->id,
243 __print_symbolic(__entry->action, iwm_ticket_action_symbol),
244 __entry->reason ? "reason" : "flags",
245 __entry->reason ? __entry->reason : __entry->flags,
246 __entry->flags & IWM_RX_TICKET_AMSDU_MSK ? ", AMSDU frame" : ""
247 )
248);
249
250TRACE_EVENT(iwm_rx_packet,
251 TP_PROTO(struct iwm_priv *iwm, void *buf, int len),
252
253 TP_ARGS(iwm, buf, len),
254
255 TP_STRUCT__entry(
256 IWM_ENTRY
257 __field(u8, source)
258 __field(u16, id)
259 __field(u32, len)
260 ),
261
262 TP_fast_assign(
263 struct iwm_umac_wifi_in_hdr *hdr = buf;
264
265 IWM_ASSIGN;
266 __entry->source = GET_VAL32(hdr->hw_hdr.cmd, UMAC_HDI_IN_CMD_SOURCE);
267 __entry->id = le16_to_cpu(hdr->sw_hdr.cmd.seq_num);
268 __entry->len = len - sizeof(*hdr);
269 ),
270
271 TP_printk(
272 IWM_PR_FMT " Rx %s packet: id 0x%x, %d bytes",
273 IWM_PR_ARG, __entry->source == UMAC_HDI_IN_SOURCE_FHRX ?
274 "LMAC" : "UMAC", __entry->id, __entry->len
275 )
276);
277#endif
278
279#undef TRACE_INCLUDE_PATH
280#define TRACE_INCLUDE_PATH .
281#undef TRACE_INCLUDE_FILE
282#define TRACE_INCLUDE_FILE trace
283#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwmc3200wifi/tx.c b/drivers/net/wireless/iwmc3200wifi/tx.c
index f6a02f123f31..9537cdb13d3f 100644
--- a/drivers/net/wireless/iwmc3200wifi/tx.c
+++ b/drivers/net/wireless/iwmc3200wifi/tx.c
@@ -347,6 +347,7 @@ static int iwm_tx_send_concat_packets(struct iwm_priv *iwm,
347 /* mark EOP for the last packet */ 347 /* mark EOP for the last packet */
348 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1); 348 iwm_udma_wifi_hdr_set_eop(iwm, txq->concat_ptr, 1);
349 349
350 trace_iwm_tx_packets(iwm, txq->concat_buf, txq->concat_count);
350 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count); 351 ret = iwm_bus_send_chunk(iwm, txq->concat_buf, txq->concat_count);
351 352
352 txq->concat_count = 0; 353 txq->concat_count = 0;
@@ -451,7 +452,6 @@ void iwm_tx_worker(struct work_struct *work)
451int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 452int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
452{ 453{
453 struct iwm_priv *iwm = ndev_to_iwm(netdev); 454 struct iwm_priv *iwm = ndev_to_iwm(netdev);
454 struct net_device *ndev = iwm_to_ndev(iwm);
455 struct wireless_dev *wdev = iwm_to_wdev(iwm); 455 struct wireless_dev *wdev = iwm_to_wdev(iwm);
456 struct iwm_tx_info *tx_info; 456 struct iwm_tx_info *tx_info;
457 struct iwm_tx_queue *txq; 457 struct iwm_tx_queue *txq;
@@ -518,12 +518,12 @@ int iwm_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
518 518
519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker); 519 queue_work(iwm->txq[queue].wq, &iwm->txq[queue].worker);
520 520
521 ndev->stats.tx_packets++; 521 netdev->stats.tx_packets++;
522 ndev->stats.tx_bytes += skb->len; 522 netdev->stats.tx_bytes += skb->len;
523 return NETDEV_TX_OK; 523 return NETDEV_TX_OK;
524 524
525 drop: 525 drop:
526 ndev->stats.tx_dropped++; 526 netdev->stats.tx_dropped++;
527 dev_kfree_skb_any(skb); 527 dev_kfree_skb_any(skb);
528 return NETDEV_TX_OK; 528 return NETDEV_TX_OK;
529} 529}
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index 7f54a145ca65..0cbba3ecc813 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -362,7 +362,7 @@ struct iwm_udma_out_wifi_hdr {
362#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4 362#define IWM_RX_TICKET_SPECIAL_SNAP_MSK 0x4
363#define IWM_RX_TICKET_AMSDU_MSK 0x8 363#define IWM_RX_TICKET_AMSDU_MSK 0x8
364#define IWM_RX_TICKET_DROP_REASON_POS 4 364#define IWM_RX_TICKET_DROP_REASON_POS 4
365#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << RX_TICKET_FLAGS_DROP_REASON_POS) 365#define IWM_RX_TICKET_DROP_REASON_MSK (0x1F << IWM_RX_TICKET_DROP_REASON_POS)
366 366
367#define IWM_RX_DROP_NO_DROP 0x0 367#define IWM_RX_DROP_NO_DROP 0x0
368#define IWM_RX_DROP_BAD_CRC 0x1 368#define IWM_RX_DROP_BAD_CRC 0x1
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 12a2ef9dacea..aa06070e5eab 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -32,6 +32,9 @@ u8 lbs_bg_rates[MAX_RATES] =
320x00, 0x00 }; 320x00, 0x00 };
33 33
34 34
35static int assoc_helper_wep_keys(struct lbs_private *priv,
36 struct assoc_request *assoc_req);
37
35/** 38/**
36 * @brief This function finds common rates between rates and card rates. 39 * @brief This function finds common rates between rates and card rates.
37 * 40 *
@@ -611,7 +614,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
611 614
612 if (status_code) { 615 if (status_code) {
613 lbs_mac_event_disconnected(priv); 616 lbs_mac_event_disconnected(priv);
614 ret = -1; 617 ret = status_code;
615 goto done; 618 goto done;
616 } 619 }
617 620
@@ -814,7 +817,24 @@ static int lbs_try_associate(struct lbs_private *priv,
814 goto out; 817 goto out;
815 818
816 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE); 819 ret = lbs_associate(priv, assoc_req, CMD_802_11_ASSOCIATE);
820 /* If the association fails with current auth mode, let's
821 * try by changing the auth mode
822 */
823 if ((priv->authtype_auto) &&
824 (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) &&
825 (assoc_req->secinfo.wep_enabled) &&
826 (priv->connect_status != LBS_CONNECTED)) {
827 if (priv->secinfo.auth_mode == IW_AUTH_ALG_OPEN_SYSTEM)
828 priv->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
829 else
830 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
831 if (!assoc_helper_wep_keys(priv, assoc_req))
832 ret = lbs_associate(priv, assoc_req,
833 CMD_802_11_ASSOCIATE);
834 }
817 835
836 if (ret)
837 ret = -1;
818out: 838out:
819 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 839 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
820 return ret; 840 return ret;
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index a48ccaffb288..6f5b843c1f44 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -75,7 +75,7 @@ static ssize_t lbs_getscantable(struct file *file, char __user *userbuf,
75 return -ENOMEM; 75 return -ENOMEM;
76 76
77 pos += snprintf(buf+pos, len-pos, 77 pos += snprintf(buf+pos, len-pos,
78 "# | ch | rssi | bssid | cap | Qual | SSID \n"); 78 "# | ch | rssi | bssid | cap | Qual | SSID\n");
79 79
80 mutex_lock(&priv->lock); 80 mutex_lock(&priv->lock);
81 list_for_each_entry (iter_bss, &priv->network_list, list) { 81 list_for_each_entry (iter_bss, &priv->network_list, list) {
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 6875e1498bd5..a54880e4ad2b 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -134,6 +134,7 @@ struct lbs_private {
134 u8 wpa_ie_len; 134 u8 wpa_ie_len;
135 u16 wep_tx_keyidx; 135 u16 wep_tx_keyidx;
136 struct enc_key wep_keys[4]; 136 struct enc_key wep_keys[4];
137 u8 authtype_auto;
137 138
138 /* Wake On LAN */ 139 /* Wake On LAN */
139 uint32_t wol_criteria; 140 uint32_t wol_criteria;
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 7d1a3c6b6ce0..cd464a2589b9 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -35,6 +35,8 @@
35#include <linux/mmc/card.h> 35#include <linux/mmc/card.h>
36#include <linux/mmc/sdio_func.h> 36#include <linux/mmc/sdio_func.h>
37#include <linux/mmc/sdio_ids.h> 37#include <linux/mmc/sdio_ids.h>
38#include <linux/mmc/sdio.h>
39#include <linux/mmc/host.h>
38 40
39#include "host.h" 41#include "host.h"
40#include "decl.h" 42#include "decl.h"
@@ -943,6 +945,7 @@ static int if_sdio_probe(struct sdio_func *func,
943 int ret, i; 945 int ret, i;
944 unsigned int model; 946 unsigned int model;
945 struct if_sdio_packet *packet; 947 struct if_sdio_packet *packet;
948 struct mmc_host *host = func->card->host;
946 949
947 lbs_deb_enter(LBS_DEB_SDIO); 950 lbs_deb_enter(LBS_DEB_SDIO);
948 951
@@ -1023,6 +1026,25 @@ static int if_sdio_probe(struct sdio_func *func,
1023 if (ret) 1026 if (ret)
1024 goto disable; 1027 goto disable;
1025 1028
1029 /* For 1-bit transfers to the 8686 model, we need to enable the
1030 * interrupt flag in the CCCR register. Set the MMC_QUIRK_LENIENT_FN0
1031 * bit to allow access to non-vendor registers. */
1032 if ((card->model == IF_SDIO_MODEL_8686) &&
1033 (host->caps & MMC_CAP_SDIO_IRQ) &&
1034 (host->ios.bus_width == MMC_BUS_WIDTH_1)) {
1035 u8 reg;
1036
1037 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
1038 reg = sdio_f0_readb(func, SDIO_CCCR_IF, &ret);
1039 if (ret)
1040 goto release_int;
1041
1042 reg |= SDIO_BUS_ECSI;
1043 sdio_f0_writeb(func, reg, SDIO_CCCR_IF, &ret);
1044 if (ret)
1045 goto release_int;
1046 }
1047
1026 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret); 1048 card->ioport = sdio_readb(func, IF_SDIO_IOPORT, &ret);
1027 if (ret) 1049 if (ret)
1028 goto release_int; 1050 goto release_int;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 598080414b17..38edad6f24b8 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -319,7 +319,7 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
319 struct net_device *dev, int nr_addrs) 319 struct net_device *dev, int nr_addrs)
320{ 320{
321 int i = nr_addrs; 321 int i = nr_addrs;
322 struct dev_mc_list *mc_list; 322 struct netdev_hw_addr *ha;
323 int cnt; 323 int cnt;
324 324
325 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST)) 325 if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
@@ -327,19 +327,19 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
327 327
328 netif_addr_lock_bh(dev); 328 netif_addr_lock_bh(dev);
329 cnt = netdev_mc_count(dev); 329 cnt = netdev_mc_count(dev);
330 netdev_for_each_mc_addr(mc_list, dev) { 330 netdev_for_each_mc_addr(ha, dev) {
331 if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) { 331 if (mac_in_list(cmd->maclist, nr_addrs, ha->addr)) {
332 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name, 332 lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
333 mc_list->dmi_addr); 333 ha->addr);
334 cnt--; 334 cnt--;
335 continue; 335 continue;
336 } 336 }
337 337
338 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE) 338 if (i == MRVDRV_MAX_MULTICAST_LIST_SIZE)
339 break; 339 break;
340 memcpy(&cmd->maclist[6*i], mc_list->dmi_addr, ETH_ALEN); 340 memcpy(&cmd->maclist[6*i], ha->addr, ETH_ALEN);
341 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name, 341 lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
342 mc_list->dmi_addr); 342 ha->addr);
343 i++; 343 i++;
344 cnt--; 344 cnt--;
345 } 345 }
@@ -836,6 +836,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
836 priv->is_auto_deep_sleep_enabled = 0; 836 priv->is_auto_deep_sleep_enabled = 0;
837 priv->wakeup_dev_required = 0; 837 priv->wakeup_dev_required = 0;
838 init_waitqueue_head(&priv->ds_awake_q); 838 init_waitqueue_head(&priv->ds_awake_q);
839 priv->authtype_auto = 1;
839 840
840 mutex_init(&priv->lock); 841 mutex_init(&priv->lock);
841 842
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 784dae714705..e2b8d886b091 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -39,10 +39,10 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
39 struct sk_buff *skb); 39 struct sk_buff *skb);
40 40
41/** 41/**
42 * @brief This function computes the avgSNR . 42 * @brief This function computes the avgSNR .
43 * 43 *
44 * @param priv A pointer to struct lbs_private structure 44 * @param priv A pointer to struct lbs_private structure
45 * @return avgSNR 45 * @return avgSNR
46 */ 46 */
47static u8 lbs_getavgsnr(struct lbs_private *priv) 47static u8 lbs_getavgsnr(struct lbs_private *priv)
48{ 48{
@@ -57,10 +57,10 @@ static u8 lbs_getavgsnr(struct lbs_private *priv)
57} 57}
58 58
59/** 59/**
60 * @brief This function computes the AvgNF 60 * @brief This function computes the AvgNF
61 * 61 *
62 * @param priv A pointer to struct lbs_private structure 62 * @param priv A pointer to struct lbs_private structure
63 * @return AvgNF 63 * @return AvgNF
64 */ 64 */
65static u8 lbs_getavgnf(struct lbs_private *priv) 65static u8 lbs_getavgnf(struct lbs_private *priv)
66{ 66{
@@ -75,11 +75,11 @@ static u8 lbs_getavgnf(struct lbs_private *priv)
75} 75}
76 76
77/** 77/**
78 * @brief This function save the raw SNR/NF to our internel buffer 78 * @brief This function save the raw SNR/NF to our internel buffer
79 * 79 *
80 * @param priv A pointer to struct lbs_private structure 80 * @param priv A pointer to struct lbs_private structure
81 * @param prxpd A pointer to rxpd structure of received packet 81 * @param prxpd A pointer to rxpd structure of received packet
82 * @return n/a 82 * @return n/a
83 */ 83 */
84static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd) 84static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
85{ 85{
@@ -94,11 +94,11 @@ static void lbs_save_rawSNRNF(struct lbs_private *priv, struct rxpd *p_rx_pd)
94} 94}
95 95
96/** 96/**
97 * @brief This function computes the RSSI in received packet. 97 * @brief This function computes the RSSI in received packet.
98 * 98 *
99 * @param priv A pointer to struct lbs_private structure 99 * @param priv A pointer to struct lbs_private structure
100 * @param prxpd A pointer to rxpd structure of received packet 100 * @param prxpd A pointer to rxpd structure of received packet
101 * @return n/a 101 * @return n/a
102 */ 102 */
103static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd) 103static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
104{ 104{
@@ -135,9 +135,9 @@ static void lbs_compute_rssi(struct lbs_private *priv, struct rxpd *p_rx_pd)
135 * @brief This function processes received packet and forwards it 135 * @brief This function processes received packet and forwards it
136 * to kernel/upper layer 136 * to kernel/upper layer
137 * 137 *
138 * @param priv A pointer to struct lbs_private 138 * @param priv A pointer to struct lbs_private
139 * @param skb A pointer to skb which includes the received packet 139 * @param skb A pointer to skb which includes the received packet
140 * @return 0 or -1 140 * @return 0 or -1
141 */ 141 */
142int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb) 142int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
143{ 143{
@@ -197,7 +197,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
197 * before the snap_type. 197 * before the snap_type.
198 */ 198 */
199 p_ethhdr = (struct ethhdr *) 199 p_ethhdr = (struct ethhdr *)
200 ((u8 *) & p_rx_pkt->eth803_hdr 200 ((u8 *) &p_rx_pkt->eth803_hdr
201 + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr) 201 + sizeof(p_rx_pkt->eth803_hdr) + sizeof(p_rx_pkt->rfc1042_hdr)
202 - sizeof(p_rx_pkt->eth803_hdr.dest_addr) 202 - sizeof(p_rx_pkt->eth803_hdr.dest_addr)
203 - sizeof(p_rx_pkt->eth803_hdr.src_addr) 203 - sizeof(p_rx_pkt->eth803_hdr.src_addr)
@@ -214,7 +214,7 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
214 hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd; 214 hdrchop = (u8 *)p_ethhdr - (u8 *)p_rx_pd;
215 } else { 215 } else {
216 lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP", 216 lbs_deb_hex(LBS_DEB_RX, "RX Data: LLC/SNAP",
217 (u8 *) & p_rx_pkt->rfc1042_hdr, 217 (u8 *) &p_rx_pkt->rfc1042_hdr,
218 sizeof(p_rx_pkt->rfc1042_hdr)); 218 sizeof(p_rx_pkt->rfc1042_hdr));
219 219
220 /* Chop off the rxpd */ 220 /* Chop off the rxpd */
@@ -255,8 +255,8 @@ EXPORT_SYMBOL_GPL(lbs_process_rxed_packet);
255 * @brief This function converts Tx/Rx rates from the Marvell WLAN format 255 * @brief This function converts Tx/Rx rates from the Marvell WLAN format
256 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s) 256 * (see Table 2 in Section 3.1) to IEEE80211_RADIOTAP_RATE units (500 Kb/s)
257 * 257 *
258 * @param rate Input rate 258 * @param rate Input rate
259 * @return Output Rate (0 if invalid) 259 * @return Output Rate (0 if invalid)
260 */ 260 */
261static u8 convert_mv_rate_to_radiotap(u8 rate) 261static u8 convert_mv_rate_to_radiotap(u8 rate)
262{ 262{
@@ -295,9 +295,9 @@ static u8 convert_mv_rate_to_radiotap(u8 rate)
295 * @brief This function processes a received 802.11 packet and forwards it 295 * @brief This function processes a received 802.11 packet and forwards it
296 * to kernel/upper layer 296 * to kernel/upper layer
297 * 297 *
298 * @param priv A pointer to struct lbs_private 298 * @param priv A pointer to struct lbs_private
299 * @param skb A pointer to skb which includes the received packet 299 * @param skb A pointer to skb which includes the received packet
300 * @return 0 or -1 300 * @return 0 or -1
301 */ 301 */
302static int process_rxed_802_11_packet(struct lbs_private *priv, 302static int process_rxed_802_11_packet(struct lbs_private *priv,
303 struct sk_buff *skb) 303 struct sk_buff *skb)
@@ -314,7 +314,7 @@ static int process_rxed_802_11_packet(struct lbs_private *priv,
314 p_rx_pkt = (struct rx80211packethdr *) skb->data; 314 p_rx_pkt = (struct rx80211packethdr *) skb->data;
315 prxpd = &p_rx_pkt->rx_pd; 315 prxpd = &p_rx_pkt->rx_pd;
316 316
317 // lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); 317 /* lbs_deb_hex(LBS_DEB_RX, "RX Data: Before chop rxpd", skb->data, min(skb->len, 100)); */
318 318
319 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) { 319 if (skb->len < (ETH_HLEN + 8 + sizeof(struct rxpd))) {
320 lbs_deb_rx("rx err: frame received with bad length\n"); 320 lbs_deb_rx("rx err: frame received with bad length\n");
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 9b555884b08a..f96a96031a50 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -1441,8 +1441,10 @@ static int lbs_set_encode(struct net_device *dev,
1441 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags); 1441 set_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags);
1442 1442
1443 if (dwrq->flags & IW_ENCODE_RESTRICTED) { 1443 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1444 priv->authtype_auto = 0;
1444 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; 1445 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1445 } else if (dwrq->flags & IW_ENCODE_OPEN) { 1446 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1447 priv->authtype_auto = 0;
1446 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1448 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1447 } 1449 }
1448 1450
@@ -1621,8 +1623,10 @@ static int lbs_set_encodeext(struct net_device *dev,
1621 goto out; 1623 goto out;
1622 1624
1623 if (dwrq->flags & IW_ENCODE_RESTRICTED) { 1625 if (dwrq->flags & IW_ENCODE_RESTRICTED) {
1626 priv->authtype_auto = 0;
1624 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY; 1627 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_SHARED_KEY;
1625 } else if (dwrq->flags & IW_ENCODE_OPEN) { 1628 } else if (dwrq->flags & IW_ENCODE_OPEN) {
1629 priv->authtype_auto = 0;
1626 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1630 assoc_req->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1627 } 1631 }
1628 1632
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 7945ff5aa334..7533a23e0500 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -369,22 +369,20 @@ static int lbtf_op_config(struct ieee80211_hw *hw, u32 changed)
369} 369}
370 370
371static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw, 371static u64 lbtf_op_prepare_multicast(struct ieee80211_hw *hw,
372 int mc_count, struct dev_addr_list *mclist) 372 struct netdev_hw_addr_list *mc_list)
373{ 373{
374 struct lbtf_private *priv = hw->priv; 374 struct lbtf_private *priv = hw->priv;
375 int i; 375 int i;
376 struct netdev_hw_addr *ha;
377 int mc_count = netdev_hw_addr_list_count(mc_list);
376 378
377 if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE) 379 if (!mc_count || mc_count > MRVDRV_MAX_MULTICAST_LIST_SIZE)
378 return mc_count; 380 return mc_count;
379 381
380 priv->nr_of_multicastmacaddr = mc_count; 382 priv->nr_of_multicastmacaddr = mc_count;
381 for (i = 0; i < mc_count; i++) { 383 i = 0;
382 if (!mclist) 384 netdev_hw_addr_list_for_each(ha, mc_list)
383 break; 385 memcpy(&priv->multicastlist[i++], ha->addr, ETH_ALEN);
384 memcpy(&priv->multicastlist[i], mclist->da_addr,
385 ETH_ALEN);
386 mclist = mclist->next;
387 }
388 386
389 return mc_count; 387 return mc_count;
390} 388}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7cd5f56662fc..dfff02f5c86d 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -291,7 +291,8 @@ struct mac80211_hwsim_data {
291 struct ieee80211_channel *channel; 291 struct ieee80211_channel *channel;
292 unsigned long beacon_int; /* in jiffies unit */ 292 unsigned long beacon_int; /* in jiffies unit */
293 unsigned int rx_filter; 293 unsigned int rx_filter;
294 bool started, idle; 294 bool started, idle, scanning;
295 struct mutex mutex;
295 struct timer_list beacon_timer; 296 struct timer_list beacon_timer;
296 enum ps_mode { 297 enum ps_mode {
297 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL 298 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -957,9 +958,9 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
957 hsd->hw = hw; 958 hsd->hw = hw;
958 INIT_DELAYED_WORK(&hsd->w, hw_scan_done); 959 INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
959 960
960 printk(KERN_DEBUG "hwsim scan request\n"); 961 printk(KERN_DEBUG "hwsim hw_scan request\n");
961 for (i = 0; i < req->n_channels; i++) 962 for (i = 0; i < req->n_channels; i++)
962 printk(KERN_DEBUG "hwsim scan freq %d\n", 963 printk(KERN_DEBUG "hwsim hw_scan freq %d\n",
963 req->channels[i]->center_freq); 964 req->channels[i]->center_freq);
964 965
965 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ); 966 ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
@@ -967,6 +968,36 @@ static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
967 return 0; 968 return 0;
968} 969}
969 970
971static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw)
972{
973 struct mac80211_hwsim_data *hwsim = hw->priv;
974
975 mutex_lock(&hwsim->mutex);
976
977 if (hwsim->scanning) {
978 printk(KERN_DEBUG "two hwsim sw_scans detected!\n");
979 goto out;
980 }
981
982 printk(KERN_DEBUG "hwsim sw_scan request, prepping stuff\n");
983 hwsim->scanning = true;
984
985out:
986 mutex_unlock(&hwsim->mutex);
987}
988
989static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw)
990{
991 struct mac80211_hwsim_data *hwsim = hw->priv;
992
993 mutex_lock(&hwsim->mutex);
994
995 printk(KERN_DEBUG "hwsim sw_scan_complete\n");
996 hwsim->scanning = true;
997
998 mutex_unlock(&hwsim->mutex);
999}
1000
970static struct ieee80211_ops mac80211_hwsim_ops = 1001static struct ieee80211_ops mac80211_hwsim_ops =
971{ 1002{
972 .tx = mac80211_hwsim_tx, 1003 .tx = mac80211_hwsim_tx,
@@ -984,6 +1015,8 @@ static struct ieee80211_ops mac80211_hwsim_ops =
984 .conf_tx = mac80211_hwsim_conf_tx, 1015 .conf_tx = mac80211_hwsim_conf_tx,
985 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) 1016 CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
986 .ampdu_action = mac80211_hwsim_ampdu_action, 1017 .ampdu_action = mac80211_hwsim_ampdu_action,
1018 .sw_scan_start = mac80211_hwsim_sw_scan,
1019 .sw_scan_complete = mac80211_hwsim_sw_scan_complete,
987 .flush = mac80211_hwsim_flush, 1020 .flush = mac80211_hwsim_flush,
988}; 1021};
989 1022
@@ -1179,8 +1212,11 @@ static int __init init_mac80211_hwsim(void)
1179 if (radios < 1 || radios > 100) 1212 if (radios < 1 || radios > 100)
1180 return -EINVAL; 1213 return -EINVAL;
1181 1214
1182 if (fake_hw_scan) 1215 if (fake_hw_scan) {
1183 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan; 1216 mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
1217 mac80211_hwsim_ops.sw_scan_start = NULL;
1218 mac80211_hwsim_ops.sw_scan_complete = NULL;
1219 }
1184 1220
1185 spin_lock_init(&hwsim_radio_lock); 1221 spin_lock_init(&hwsim_radio_lock);
1186 INIT_LIST_HEAD(&hwsim_radios); 1222 INIT_LIST_HEAD(&hwsim_radios);
@@ -1285,6 +1321,7 @@ static int __init init_mac80211_hwsim(void)
1285 } 1321 }
1286 /* By default all radios are belonging to the first group */ 1322 /* By default all radios are belonging to the first group */
1287 data->group = 1; 1323 data->group = 1;
1324 mutex_init(&data->mutex);
1288 1325
1289 /* Work to be done prior to ieee80211_register_hw() */ 1326 /* Work to be done prior to ieee80211_register_hw() */
1290 switch (regtest) { 1327 switch (regtest) {
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 12fdcb25fd38..73bbd080c6e7 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -1939,11 +1939,15 @@ struct mwl8k_cmd_mac_multicast_adr {
1939 1939
1940static struct mwl8k_cmd_pkt * 1940static struct mwl8k_cmd_pkt *
1941__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti, 1941__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1942 int mc_count, struct dev_addr_list *mclist) 1942 struct netdev_hw_addr_list *mc_list)
1943{ 1943{
1944 struct mwl8k_priv *priv = hw->priv; 1944 struct mwl8k_priv *priv = hw->priv;
1945 struct mwl8k_cmd_mac_multicast_adr *cmd; 1945 struct mwl8k_cmd_mac_multicast_adr *cmd;
1946 int size; 1946 int size;
1947 int mc_count = 0;
1948
1949 if (mc_list)
1950 mc_count = netdev_hw_addr_list_count(mc_list);
1947 1951
1948 if (allmulti || mc_count > priv->num_mcaddrs) { 1952 if (allmulti || mc_count > priv->num_mcaddrs) {
1949 allmulti = 1; 1953 allmulti = 1;
@@ -1964,17 +1968,13 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1964 if (allmulti) { 1968 if (allmulti) {
1965 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST); 1969 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
1966 } else if (mc_count) { 1970 } else if (mc_count) {
1967 int i; 1971 struct netdev_hw_addr *ha;
1972 int i = 0;
1968 1973
1969 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); 1974 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
1970 cmd->numaddr = cpu_to_le16(mc_count); 1975 cmd->numaddr = cpu_to_le16(mc_count);
1971 for (i = 0; i < mc_count && mclist; i++) { 1976 netdev_hw_addr_list_for_each(ha, mc_list) {
1972 if (mclist->da_addrlen != ETH_ALEN) { 1977 memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
1973 kfree(cmd);
1974 return NULL;
1975 }
1976 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1977 mclist = mclist->next;
1978 } 1978 }
1979 } 1979 }
1980 1980
@@ -3553,7 +3553,7 @@ mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3553} 3553}
3554 3554
3555static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw, 3555static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3556 int mc_count, struct dev_addr_list *mclist) 3556 struct netdev_hw_addr_list *mc_list)
3557{ 3557{
3558 struct mwl8k_cmd_pkt *cmd; 3558 struct mwl8k_cmd_pkt *cmd;
3559 3559
@@ -3564,7 +3564,7 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
3564 * we'll end up throwing this packet away and creating a new 3564 * we'll end up throwing this packet away and creating a new
3565 * one in mwl8k_configure_filter(). 3565 * one in mwl8k_configure_filter().
3566 */ 3566 */
3567 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist); 3567 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
3568 3568
3569 return (unsigned long)cmd; 3569 return (unsigned long)cmd;
3570} 3570}
@@ -3687,7 +3687,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
3687 */ 3687 */
3688 if (*total_flags & FIF_ALLMULTI) { 3688 if (*total_flags & FIF_ALLMULTI) {
3689 kfree(cmd); 3689 kfree(cmd);
3690 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL); 3690 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
3691 } 3691 }
3692 3692
3693 if (cmd != NULL) { 3693 if (cmd != NULL) {
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index e2a2c18920aa..6116b546861d 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -27,6 +27,17 @@ config HERMES
27 configure your card and that /etc/pcmcia/wireless.opts works : 27 configure your card and that /etc/pcmcia/wireless.opts works :
28 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html> 28 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>
29 29
30config HERMES_PRISM
31 bool "Support Prism 2/2.5 chipset"
32 depends on HERMES
33 ---help---
34
35 Say Y to enable support for Prism 2 and 2.5 chipsets. These
36 chipsets are better handled by the hostap driver. This driver
37 would not support WPA or firmware download for Prism chipset.
38
39 If you are not sure, say N.
40
30config HERMES_CACHE_FW_ON_INIT 41config HERMES_CACHE_FW_ON_INIT
31 bool "Cache Hermes firmware on driver initialisation" 42 bool "Cache Hermes firmware on driver initialisation"
32 depends on HERMES 43 depends on HERMES
@@ -86,7 +97,7 @@ config NORTEL_HERMES
86 97
87config PCI_HERMES 98config PCI_HERMES
88 tristate "Prism 2.5 PCI 802.11b adaptor support" 99 tristate "Prism 2.5 PCI 802.11b adaptor support"
89 depends on PCI && HERMES 100 depends on PCI && HERMES && HERMES_PRISM
90 help 101 help
91 Enable support for PCI and mini-PCI 802.11b wireless NICs based on 102 Enable support for PCI and mini-PCI 802.11b wireless NICs based on
92 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b 103 the Prism 2.5 chipset. These are true PCI cards, not the 802.11b
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index e6369242e49c..9f657afaa3e5 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -262,6 +262,13 @@ int determine_fw_capabilities(struct orinoco_private *priv,
262 if (fw_name) 262 if (fw_name)
263 dev_info(dev, "Firmware determined as %s\n", fw_name); 263 dev_info(dev, "Firmware determined as %s\n", fw_name);
264 264
265#ifndef CONFIG_HERMES_PRISM
266 if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) {
267 dev_err(dev, "Support for Prism chipset is not enabled\n");
268 return -ENODEV;
269 }
270#endif
271
265 return 0; 272 return 0;
266} 273}
267 274
@@ -1049,14 +1056,14 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
1049 * group address if either we want to multicast, or if we were 1056 * group address if either we want to multicast, or if we were
1050 * multicasting and want to stop */ 1057 * multicasting and want to stop */
1051 if (!promisc && (mc_count || priv->mc_count)) { 1058 if (!promisc && (mc_count || priv->mc_count)) {
1052 struct dev_mc_list *p; 1059 struct netdev_hw_addr *ha;
1053 struct hermes_multicast mclist; 1060 struct hermes_multicast mclist;
1054 int i = 0; 1061 int i = 0;
1055 1062
1056 netdev_for_each_mc_addr(p, dev) { 1063 netdev_for_each_mc_addr(ha, dev) {
1057 if (i == mc_count) 1064 if (i == mc_count)
1058 break; 1065 break;
1059 memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN); 1066 memcpy(mclist.addr[i++], ha->addr, ETH_ALEN);
1060 } 1067 }
1061 1068
1062 err = hermes_write_ltv(hw, USER_BAP, 1069 err = hermes_write_ltv(hw, USER_BAP,
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 9799a1d14a63..97af71e79950 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -22,7 +22,6 @@
22 22
23/* Forward declarations */ 23/* Forward declarations */
24struct orinoco_private; 24struct orinoco_private;
25struct dev_addr_list;
26 25
27int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, 26int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
28 size_t fw_name_len, u32 *hw_ver); 27 size_t fw_name_len, u32 *hw_ver);
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index 1d4ada188eda..fdc961379170 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -374,87 +374,90 @@ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION
374 "Pavel Roskin <proski@gnu.org>, et al)"; 374 "Pavel Roskin <proski@gnu.org>, et al)";
375 375
376static struct pcmcia_device_id orinoco_cs_ids[] = { 376static struct pcmcia_device_id orinoco_cs_ids[] = {
377 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
378 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
379 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
380 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ 377 PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */
381 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
382 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
383 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */ 378 PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), /* Lucent Orinoco and old Intersil */
384 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ 379 PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */
385 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */ 380 PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */
386 PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
387 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
388 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ 381 PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */
389 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */ 382 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */
390 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */ 383 PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */
391 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */ 384 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */
392 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
393 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
394 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
395 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
396 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ 385 PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */
397 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ 386 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */
398 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ 387 PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */
399 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ 388 PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */
389 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
390 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
391 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
392 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
393 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
394 PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
395 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
396 PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916),
397 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
398 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
399 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
400 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
401 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
402 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
403 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
404 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
405 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
406 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
407 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
408 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
409 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
410 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
411#ifdef CONFIG_HERMES_PRISM
412 /* Only entries that certainly identify Prism chipset */
413 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */
414 PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */
415 PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */
416 PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */
417 PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */
418 PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */
419 PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */
420 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */
421 PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */
422 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */
423 PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */
400 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ 424 PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */
401 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ 425 PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */
402 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */ 426 PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */
403 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */ 427 PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */
404 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */ 428 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */
405 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */ 429 PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */
406 PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9), 430 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
407 PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
408 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), 431 PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
409 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2), 432 PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
410 PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
411 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
412 PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
413 PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169),
414 PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb),
415 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
416 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), 433 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
417 PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), 434 PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3),
418 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), 435 PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
419 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), 436 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
420 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), 437 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
421 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), 438 PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
439 PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
422 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), 440 PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac),
423 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), 441 PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab),
424 PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916), 442 PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9),
425 PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146),
426 PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3),
427 PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c),
428 PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0),
429 PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077),
430 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), 443 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
431 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77), 444 PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
432 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf), 445 PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
433 PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
434 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), 446 PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
435 PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
436 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410),
437 PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3),
438 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01), 447 PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01),
439 PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a),
440 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
441 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1), 448 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1),
442 PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767), 449 PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1),
443 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6), 450 PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
444 PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
445 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), 451 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
446 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178), 452 PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
447 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
448 PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26),
449 PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b),
450 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757), 453 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
451 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), 454 PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
452 PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
453 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), 455 PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
454 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092), 456 PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
455 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2), 457 PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
456 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b), 458 PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
457 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39), 459 PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
460#endif
458 PCMCIA_DEVICE_NULL, 461 PCMCIA_DEVICE_NULL,
459}; 462};
460MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); 463MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index fbcc6e1a2e1d..57b850ebfeb2 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -1506,46 +1506,44 @@ static const struct iw_priv_args orinoco_privtab[] = {
1506 * Structures to export the Wireless Handlers 1506 * Structures to export the Wireless Handlers
1507 */ 1507 */
1508 1508
1509#define STD_IW_HANDLER(id, func) \
1510 [IW_IOCTL_IDX(id)] = (iw_handler) func
1511static const iw_handler orinoco_handler[] = { 1509static const iw_handler orinoco_handler[] = {
1512 STD_IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit), 1510 IW_HANDLER(SIOCSIWCOMMIT, (iw_handler)orinoco_ioctl_commit),
1513 STD_IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname), 1511 IW_HANDLER(SIOCGIWNAME, (iw_handler)cfg80211_wext_giwname),
1514 STD_IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq), 1512 IW_HANDLER(SIOCSIWFREQ, (iw_handler)orinoco_ioctl_setfreq),
1515 STD_IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq), 1513 IW_HANDLER(SIOCGIWFREQ, (iw_handler)orinoco_ioctl_getfreq),
1516 STD_IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode), 1514 IW_HANDLER(SIOCSIWMODE, (iw_handler)cfg80211_wext_siwmode),
1517 STD_IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode), 1515 IW_HANDLER(SIOCGIWMODE, (iw_handler)cfg80211_wext_giwmode),
1518 STD_IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens), 1516 IW_HANDLER(SIOCSIWSENS, (iw_handler)orinoco_ioctl_setsens),
1519 STD_IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens), 1517 IW_HANDLER(SIOCGIWSENS, (iw_handler)orinoco_ioctl_getsens),
1520 STD_IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange), 1518 IW_HANDLER(SIOCGIWRANGE, (iw_handler)cfg80211_wext_giwrange),
1521 STD_IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), 1519 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1522 STD_IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), 1520 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1523 STD_IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), 1521 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1524 STD_IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), 1522 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1525 STD_IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap), 1523 IW_HANDLER(SIOCSIWAP, (iw_handler)orinoco_ioctl_setwap),
1526 STD_IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap), 1524 IW_HANDLER(SIOCGIWAP, (iw_handler)orinoco_ioctl_getwap),
1527 STD_IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan), 1525 IW_HANDLER(SIOCSIWSCAN, (iw_handler)cfg80211_wext_siwscan),
1528 STD_IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan), 1526 IW_HANDLER(SIOCGIWSCAN, (iw_handler)cfg80211_wext_giwscan),
1529 STD_IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid), 1527 IW_HANDLER(SIOCSIWESSID, (iw_handler)orinoco_ioctl_setessid),
1530 STD_IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid), 1528 IW_HANDLER(SIOCGIWESSID, (iw_handler)orinoco_ioctl_getessid),
1531 STD_IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate), 1529 IW_HANDLER(SIOCSIWRATE, (iw_handler)orinoco_ioctl_setrate),
1532 STD_IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate), 1530 IW_HANDLER(SIOCGIWRATE, (iw_handler)orinoco_ioctl_getrate),
1533 STD_IW_HANDLER(SIOCSIWRTS, orinoco_ioctl_setrts), 1531 IW_HANDLER(SIOCSIWRTS, (iw_handler)orinoco_ioctl_setrts),
1534 STD_IW_HANDLER(SIOCGIWRTS, orinoco_ioctl_getrts), 1532 IW_HANDLER(SIOCGIWRTS, (iw_handler)orinoco_ioctl_getrts),
1535 STD_IW_HANDLER(SIOCSIWFRAG, orinoco_ioctl_setfrag), 1533 IW_HANDLER(SIOCSIWFRAG, (iw_handler)orinoco_ioctl_setfrag),
1536 STD_IW_HANDLER(SIOCGIWFRAG, orinoco_ioctl_getfrag), 1534 IW_HANDLER(SIOCGIWFRAG, (iw_handler)orinoco_ioctl_getfrag),
1537 STD_IW_HANDLER(SIOCGIWRETRY, orinoco_ioctl_getretry), 1535 IW_HANDLER(SIOCGIWRETRY, (iw_handler)orinoco_ioctl_getretry),
1538 STD_IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode), 1536 IW_HANDLER(SIOCSIWENCODE, (iw_handler)orinoco_ioctl_setiwencode),
1539 STD_IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode), 1537 IW_HANDLER(SIOCGIWENCODE, (iw_handler)orinoco_ioctl_getiwencode),
1540 STD_IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower), 1538 IW_HANDLER(SIOCSIWPOWER, (iw_handler)orinoco_ioctl_setpower),
1541 STD_IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower), 1539 IW_HANDLER(SIOCGIWPOWER, (iw_handler)orinoco_ioctl_getpower),
1542 STD_IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie), 1540 IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie),
1543 STD_IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie), 1541 IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie),
1544 STD_IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme), 1542 IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme),
1545 STD_IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth), 1543 IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth),
1546 STD_IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth), 1544 IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth),
1547 STD_IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext), 1545 IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext),
1548 STD_IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext), 1546 IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext),
1549}; 1547};
1550 1548
1551 1549
@@ -1553,15 +1551,15 @@ static const iw_handler orinoco_handler[] = {
1553 Added typecasting since we no longer use iwreq_data -- Moustafa 1551 Added typecasting since we no longer use iwreq_data -- Moustafa
1554 */ 1552 */
1555static const iw_handler orinoco_private_handler[] = { 1553static const iw_handler orinoco_private_handler[] = {
1556 [0] = (iw_handler) orinoco_ioctl_reset, 1554 [0] = (iw_handler)orinoco_ioctl_reset,
1557 [1] = (iw_handler) orinoco_ioctl_reset, 1555 [1] = (iw_handler)orinoco_ioctl_reset,
1558 [2] = (iw_handler) orinoco_ioctl_setport3, 1556 [2] = (iw_handler)orinoco_ioctl_setport3,
1559 [3] = (iw_handler) orinoco_ioctl_getport3, 1557 [3] = (iw_handler)orinoco_ioctl_getport3,
1560 [4] = (iw_handler) orinoco_ioctl_setpreamble, 1558 [4] = (iw_handler)orinoco_ioctl_setpreamble,
1561 [5] = (iw_handler) orinoco_ioctl_getpreamble, 1559 [5] = (iw_handler)orinoco_ioctl_getpreamble,
1562 [6] = (iw_handler) orinoco_ioctl_setibssport, 1560 [6] = (iw_handler)orinoco_ioctl_setibssport,
1563 [7] = (iw_handler) orinoco_ioctl_getibssport, 1561 [7] = (iw_handler)orinoco_ioctl_getibssport,
1564 [9] = (iw_handler) orinoco_ioctl_getrid, 1562 [9] = (iw_handler)orinoco_ioctl_getrid,
1565}; 1563};
1566 1564
1567const struct iw_handler_def orinoco_handler_def = { 1565const struct iw_handler_def orinoco_handler_def = {
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index a7cb9eb759a1..7bbd9d3bba60 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -546,6 +546,7 @@ struct ieee80211_hw *p54_init_common(size_t priv_data_len)
546 IEEE80211_HW_SUPPORTS_PS | 546 IEEE80211_HW_SUPPORTS_PS |
547 IEEE80211_HW_PS_NULLFUNC_STACK | 547 IEEE80211_HW_PS_NULLFUNC_STACK |
548 IEEE80211_HW_BEACON_FILTER | 548 IEEE80211_HW_BEACON_FILTER |
549 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
549 IEEE80211_HW_NOISE_DBM; 550 IEEE80211_HW_NOISE_DBM;
550 551
551 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 552 dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index 269fda362836..86f3e9ac4c7a 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -132,7 +132,7 @@ static int p54p_upload_firmware(struct ieee80211_hw *dev)
132 132
133static void p54p_refill_rx_ring(struct ieee80211_hw *dev, 133static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
134 int ring_index, struct p54p_desc *ring, u32 ring_limit, 134 int ring_index, struct p54p_desc *ring, u32 ring_limit,
135 struct sk_buff **rx_buf) 135 struct sk_buff **rx_buf, u32 index)
136{ 136{
137 struct p54p_priv *priv = dev->priv; 137 struct p54p_priv *priv = dev->priv;
138 struct p54p_ring_control *ring_control = priv->ring_control; 138 struct p54p_ring_control *ring_control = priv->ring_control;
@@ -140,7 +140,7 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
140 140
141 idx = le32_to_cpu(ring_control->host_idx[ring_index]); 141 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
142 limit = idx; 142 limit = idx;
143 limit -= le32_to_cpu(ring_control->device_idx[ring_index]); 143 limit -= le32_to_cpu(index);
144 limit = ring_limit - limit; 144 limit = ring_limit - limit;
145 145
146 i = idx % ring_limit; 146 i = idx % ring_limit;
@@ -232,7 +232,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
232 i %= ring_limit; 232 i %= ring_limit;
233 } 233 }
234 234
235 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf); 235 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
236} 236}
237 237
238static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, 238static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
@@ -277,14 +277,6 @@ static void p54p_tasklet(unsigned long dev_id)
277 struct p54p_priv *priv = dev->priv; 277 struct p54p_priv *priv = dev->priv;
278 struct p54p_ring_control *ring_control = priv->ring_control; 278 struct p54p_ring_control *ring_control = priv->ring_control;
279 279
280 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
281 ARRAY_SIZE(ring_control->tx_mgmt),
282 priv->tx_buf_mgmt);
283
284 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
285 ARRAY_SIZE(ring_control->tx_data),
286 priv->tx_buf_data);
287
288 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, 280 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
289 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); 281 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
290 282
@@ -293,6 +285,14 @@ static void p54p_tasklet(unsigned long dev_id)
293 285
294 wmb(); 286 wmb();
295 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); 287 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
288
289 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
290 ARRAY_SIZE(ring_control->tx_mgmt),
291 priv->tx_buf_mgmt);
292
293 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
294 ARRAY_SIZE(ring_control->tx_data),
295 priv->tx_buf_data);
296} 296}
297 297
298static irqreturn_t p54p_interrupt(int irq, void *dev_id) 298static irqreturn_t p54p_interrupt(int irq, void *dev_id)
@@ -445,10 +445,10 @@ static int p54p_open(struct ieee80211_hw *dev)
445 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; 445 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
446 446
447 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, 447 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
448 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data); 448 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
449 449
450 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, 450 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
451 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt); 451 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
452 452
453 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); 453 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
454 P54P_READ(ring_control_base); 454 P54P_READ(ring_control_base);
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index 66057999a93c..2ceff5480355 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -38,7 +38,7 @@ static void p54_dump_tx_queue(struct p54_common *priv)
38 u32 largest_hole = 0, free; 38 u32 largest_hole = 0, free;
39 39
40 spin_lock_irqsave(&priv->tx_queue.lock, flags); 40 spin_lock_irqsave(&priv->tx_queue.lock, flags);
41 printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) --- \n", 41 printk(KERN_DEBUG "%s: / --- tx queue dump (%d entries) ---\n",
42 wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue)); 42 wiphy_name(priv->hw->wiphy), skb_queue_len(&priv->tx_queue));
43 43
44 prev_addr = priv->rx_start; 44 prev_addr = priv->rx_start;
diff --git a/drivers/net/wireless/prism54/islpci_dev.c b/drivers/net/wireless/prism54/islpci_dev.c
index 689d59a13d5b..10d91afefa33 100644
--- a/drivers/net/wireless/prism54/islpci_dev.c
+++ b/drivers/net/wireless/prism54/islpci_dev.c
@@ -228,14 +228,14 @@ islpci_interrupt(int irq, void *config)
228 228
229#if VERBOSE > SHOW_ERROR_MESSAGES 229#if VERBOSE > SHOW_ERROR_MESSAGES
230 DEBUG(SHOW_FUNCTION_CALLS, 230 DEBUG(SHOW_FUNCTION_CALLS,
231 "IRQ: Identification register 0x%p 0x%x \n", device, reg); 231 "IRQ: Identification register 0x%p 0x%x\n", device, reg);
232#endif 232#endif
233 233
234 /* check for each bit in the register separately */ 234 /* check for each bit in the register separately */
235 if (reg & ISL38XX_INT_IDENT_UPDATE) { 235 if (reg & ISL38XX_INT_IDENT_UPDATE) {
236#if VERBOSE > SHOW_ERROR_MESSAGES 236#if VERBOSE > SHOW_ERROR_MESSAGES
237 /* Queue has been updated */ 237 /* Queue has been updated */
238 DEBUG(SHOW_TRACING, "IRQ: Update flag \n"); 238 DEBUG(SHOW_TRACING, "IRQ: Update flag\n");
239 239
240 DEBUG(SHOW_QUEUE_INDEXES, 240 DEBUG(SHOW_QUEUE_INDEXES,
241 "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", 241 "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n",
@@ -301,7 +301,7 @@ islpci_interrupt(int irq, void *config)
301 ISL38XX_CB_RX_DATA_LQ) != 0) { 301 ISL38XX_CB_RX_DATA_LQ) != 0) {
302#if VERBOSE > SHOW_ERROR_MESSAGES 302#if VERBOSE > SHOW_ERROR_MESSAGES
303 DEBUG(SHOW_TRACING, 303 DEBUG(SHOW_TRACING,
304 "Received frame in Data Low Queue \n"); 304 "Received frame in Data Low Queue\n");
305#endif 305#endif
306 islpci_eth_receive(priv); 306 islpci_eth_receive(priv);
307 } 307 }
@@ -326,7 +326,7 @@ islpci_interrupt(int irq, void *config)
326 /* Device has been initialized */ 326 /* Device has been initialized */
327#if VERBOSE > SHOW_ERROR_MESSAGES 327#if VERBOSE > SHOW_ERROR_MESSAGES
328 DEBUG(SHOW_TRACING, 328 DEBUG(SHOW_TRACING,
329 "IRQ: Init flag, device initialized \n"); 329 "IRQ: Init flag, device initialized\n");
330#endif 330#endif
331 wake_up(&priv->reset_done); 331 wake_up(&priv->reset_done);
332 } 332 }
@@ -334,7 +334,7 @@ islpci_interrupt(int irq, void *config)
334 if (reg & ISL38XX_INT_IDENT_SLEEP) { 334 if (reg & ISL38XX_INT_IDENT_SLEEP) {
335 /* Device intends to move to powersave state */ 335 /* Device intends to move to powersave state */
336#if VERBOSE > SHOW_ERROR_MESSAGES 336#if VERBOSE > SHOW_ERROR_MESSAGES
337 DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n"); 337 DEBUG(SHOW_TRACING, "IRQ: Sleep flag\n");
338#endif 338#endif
339 isl38xx_handle_sleep_request(priv->control_block, 339 isl38xx_handle_sleep_request(priv->control_block,
340 &powerstate, 340 &powerstate,
@@ -344,7 +344,7 @@ islpci_interrupt(int irq, void *config)
344 if (reg & ISL38XX_INT_IDENT_WAKEUP) { 344 if (reg & ISL38XX_INT_IDENT_WAKEUP) {
345 /* Device has been woken up to active state */ 345 /* Device has been woken up to active state */
346#if VERBOSE > SHOW_ERROR_MESSAGES 346#if VERBOSE > SHOW_ERROR_MESSAGES
347 DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n"); 347 DEBUG(SHOW_TRACING, "IRQ: Wakeup flag\n");
348#endif 348#endif
349 349
350 isl38xx_handle_wakeup(priv->control_block, 350 isl38xx_handle_wakeup(priv->control_block,
@@ -635,7 +635,7 @@ islpci_alloc_memory(islpci_private *priv)
635 ioremap(pci_resource_start(priv->pdev, 0), 635 ioremap(pci_resource_start(priv->pdev, 0),
636 ISL38XX_PCI_MEM_SIZE))) { 636 ISL38XX_PCI_MEM_SIZE))) {
637 /* error in remapping the PCI device memory address range */ 637 /* error in remapping the PCI device memory address range */
638 printk(KERN_ERR "PCI memory remapping failed \n"); 638 printk(KERN_ERR "PCI memory remapping failed\n");
639 return -1; 639 return -1;
640 } 640 }
641 641
@@ -902,7 +902,7 @@ islpci_setup(struct pci_dev *pdev)
902 902
903 if (register_netdev(ndev)) { 903 if (register_netdev(ndev)) {
904 DEBUG(SHOW_ERROR_MESSAGES, 904 DEBUG(SHOW_ERROR_MESSAGES,
905 "ERROR: register_netdev() failed \n"); 905 "ERROR: register_netdev() failed\n");
906 goto do_islpci_free_memory; 906 goto do_islpci_free_memory;
907 } 907 }
908 908
diff --git a/drivers/net/wireless/prism54/islpci_eth.c b/drivers/net/wireless/prism54/islpci_eth.c
index ac99eaaeabce..64585da8a96c 100644
--- a/drivers/net/wireless/prism54/islpci_eth.c
+++ b/drivers/net/wireless/prism54/islpci_eth.c
@@ -90,7 +90,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
90 u32 curr_frag; 90 u32 curr_frag;
91 91
92#if VERBOSE > SHOW_ERROR_MESSAGES 92#if VERBOSE > SHOW_ERROR_MESSAGES
93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit \n"); 93 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
94#endif 94#endif
95 95
96 /* lock the driver code */ 96 /* lock the driver code */
@@ -141,7 +141,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
141 } 141 }
142 142
143#if VERBOSE > SHOW_ERROR_MESSAGES 143#if VERBOSE > SHOW_ERROR_MESSAGES
144 DEBUG(SHOW_TRACING, "memmove %p %p %i \n", skb->data, 144 DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
145 src, skb->len); 145 src, skb->len);
146#endif 146#endif
147 } else { 147 } else {
@@ -320,7 +320,7 @@ islpci_eth_receive(islpci_private *priv)
320 int discard = 0; 320 int discard = 0;
321 321
322#if VERBOSE > SHOW_ERROR_MESSAGES 322#if VERBOSE > SHOW_ERROR_MESSAGES
323 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive \n"); 323 DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_receive\n");
324#endif 324#endif
325 325
326 /* the device has written an Ethernet frame in the data area 326 /* the device has written an Ethernet frame in the data area
@@ -432,7 +432,7 @@ islpci_eth_receive(islpci_private *priv)
432 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2); 432 skb = dev_alloc_skb(MAX_FRAGMENT_SIZE_RX + 2);
433 if (unlikely(skb == NULL)) { 433 if (unlikely(skb == NULL)) {
434 /* error allocating an sk_buff structure elements */ 434 /* error allocating an sk_buff structure elements */
435 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb \n"); 435 DEBUG(SHOW_ERROR_MESSAGES, "Error allocating skb\n");
436 break; 436 break;
437 } 437 }
438 skb_reserve(skb, (4 - (long) skb->data) & 0x03); 438 skb_reserve(skb, (4 - (long) skb->data) & 0x03);
diff --git a/drivers/net/wireless/prism54/islpci_mgt.c b/drivers/net/wireless/prism54/islpci_mgt.c
index adb289723a96..a5224f6160e4 100644
--- a/drivers/net/wireless/prism54/islpci_mgt.c
+++ b/drivers/net/wireless/prism54/islpci_mgt.c
@@ -114,7 +114,7 @@ islpci_mgmt_rx_fill(struct net_device *ndev)
114 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); 114 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]);
115 115
116#if VERBOSE > SHOW_ERROR_MESSAGES 116#if VERBOSE > SHOW_ERROR_MESSAGES
117 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill \n"); 117 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgmt_rx_fill\n");
118#endif 118#endif
119 119
120 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { 120 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) {
@@ -212,7 +212,7 @@ islpci_mgt_transmit(struct net_device *ndev, int operation, unsigned long oid,
212 { 212 {
213 pimfor_header_t *h = buf.mem; 213 pimfor_header_t *h = buf.mem;
214 DEBUG(SHOW_PIMFOR_FRAMES, 214 DEBUG(SHOW_PIMFOR_FRAMES,
215 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x \n", 215 "PIMFOR: op %i, oid 0x%08lx, device %i, flags 0x%x length 0x%x\n",
216 h->operation, oid, h->device_id, h->flags, length); 216 h->operation, oid, h->device_id, h->flags, length);
217 217
218 /* display the buffer contents for debugging */ 218 /* display the buffer contents for debugging */
@@ -280,7 +280,7 @@ islpci_mgt_receive(struct net_device *ndev)
280 u32 curr_frag; 280 u32 curr_frag;
281 281
282#if VERBOSE > SHOW_ERROR_MESSAGES 282#if VERBOSE > SHOW_ERROR_MESSAGES
283 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive \n"); 283 DEBUG(SHOW_FUNCTION_CALLS, "islpci_mgt_receive\n");
284#endif 284#endif
285 285
286 /* Only once per interrupt, determine fragment range to 286 /* Only once per interrupt, determine fragment range to
@@ -339,7 +339,7 @@ islpci_mgt_receive(struct net_device *ndev)
339 339
340#if VERBOSE > SHOW_ERROR_MESSAGES 340#if VERBOSE > SHOW_ERROR_MESSAGES
341 DEBUG(SHOW_PIMFOR_FRAMES, 341 DEBUG(SHOW_PIMFOR_FRAMES,
342 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x \n", 342 "PIMFOR: op %i, oid 0x%08x, device %i, flags 0x%x length 0x%x\n",
343 header->operation, header->oid, header->device_id, 343 header->operation, header->oid, header->device_id,
344 header->flags, header->length); 344 header->flags, header->length);
345 345
diff --git a/drivers/net/wireless/prism54/oid_mgt.c b/drivers/net/wireless/prism54/oid_mgt.c
index d66933d70fb9..9b796cae4afe 100644
--- a/drivers/net/wireless/prism54/oid_mgt.c
+++ b/drivers/net/wireless/prism54/oid_mgt.c
@@ -820,7 +820,7 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
820 k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr); 820 k = snprintf(str, PRIV_STR_SIZE, "nr=%u\n", list->nr);
821 for (i = 0; i < list->nr; i++) 821 for (i = 0; i < list->nr; i++)
822 k += snprintf(str + k, PRIV_STR_SIZE - k, 822 k += snprintf(str + k, PRIV_STR_SIZE - k,
823 "bss[%u] : \nage=%u\nchannel=%u\n" 823 "bss[%u] :\nage=%u\nchannel=%u\n"
824 "capinfo=0x%X\nrates=0x%X\n" 824 "capinfo=0x%X\nrates=0x%X\n"
825 "basic_rates=0x%X\n", 825 "basic_rates=0x%X\n",
826 i, list->bsslist[i].age, 826 i, list->bsslist[i].age,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 11865ea21875..d9c45bfcee61 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -555,7 +555,7 @@ static int ray_init(struct net_device *dev)
555 local->fw_ver = local->startup_res.firmware_version[0]; 555 local->fw_ver = local->startup_res.firmware_version[0];
556 local->fw_bld = local->startup_res.firmware_version[1]; 556 local->fw_bld = local->startup_res.firmware_version[1];
557 local->fw_var = local->startup_res.firmware_version[2]; 557 local->fw_var = local->startup_res.firmware_version[2];
558 dev_dbg(&link->dev, "ray_init firmware version %d.%d \n", local->fw_ver, 558 dev_dbg(&link->dev, "ray_init firmware version %d.%d\n", local->fw_ver,
559 local->fw_bld); 559 local->fw_bld);
560 560
561 local->tib_length = 0x20; 561 local->tib_length = 0x20;
@@ -1112,10 +1112,10 @@ static const struct ethtool_ops netdev_ethtool_ops = {
1112/* 1112/*
1113 * Wireless Handler : get protocol name 1113 * Wireless Handler : get protocol name
1114 */ 1114 */
1115static int ray_get_name(struct net_device *dev, 1115static int ray_get_name(struct net_device *dev, struct iw_request_info *info,
1116 struct iw_request_info *info, char *cwrq, char *extra) 1116 union iwreq_data *wrqu, char *extra)
1117{ 1117{
1118 strcpy(cwrq, "IEEE 802.11-FH"); 1118 strcpy(wrqu->name, "IEEE 802.11-FH");
1119 return 0; 1119 return 0;
1120} 1120}
1121 1121
@@ -1123,9 +1123,8 @@ static int ray_get_name(struct net_device *dev,
1123/* 1123/*
1124 * Wireless Handler : set frequency 1124 * Wireless Handler : set frequency
1125 */ 1125 */
1126static int ray_set_freq(struct net_device *dev, 1126static int ray_set_freq(struct net_device *dev, struct iw_request_info *info,
1127 struct iw_request_info *info, 1127 union iwreq_data *wrqu, char *extra)
1128 struct iw_freq *fwrq, char *extra)
1129{ 1128{
1130 ray_dev_t *local = netdev_priv(dev); 1129 ray_dev_t *local = netdev_priv(dev);
1131 int err = -EINPROGRESS; /* Call commit handler */ 1130 int err = -EINPROGRESS; /* Call commit handler */
@@ -1135,10 +1134,10 @@ static int ray_set_freq(struct net_device *dev,
1135 return -EBUSY; 1134 return -EBUSY;
1136 1135
1137 /* Setting by channel number */ 1136 /* Setting by channel number */
1138 if ((fwrq->m > USA_HOP_MOD) || (fwrq->e > 0)) 1137 if ((wrqu->freq.m > USA_HOP_MOD) || (wrqu->freq.e > 0))
1139 err = -EOPNOTSUPP; 1138 err = -EOPNOTSUPP;
1140 else 1139 else
1141 local->sparm.b5.a_hop_pattern = fwrq->m; 1140 local->sparm.b5.a_hop_pattern = wrqu->freq.m;
1142 1141
1143 return err; 1142 return err;
1144} 1143}
@@ -1147,14 +1146,13 @@ static int ray_set_freq(struct net_device *dev,
1147/* 1146/*
1148 * Wireless Handler : get frequency 1147 * Wireless Handler : get frequency
1149 */ 1148 */
1150static int ray_get_freq(struct net_device *dev, 1149static int ray_get_freq(struct net_device *dev, struct iw_request_info *info,
1151 struct iw_request_info *info, 1150 union iwreq_data *wrqu, char *extra)
1152 struct iw_freq *fwrq, char *extra)
1153{ 1151{
1154 ray_dev_t *local = netdev_priv(dev); 1152 ray_dev_t *local = netdev_priv(dev);
1155 1153
1156 fwrq->m = local->sparm.b5.a_hop_pattern; 1154 wrqu->freq.m = local->sparm.b5.a_hop_pattern;
1157 fwrq->e = 0; 1155 wrqu->freq.e = 0;
1158 return 0; 1156 return 0;
1159} 1157}
1160 1158
@@ -1162,9 +1160,8 @@ static int ray_get_freq(struct net_device *dev,
1162/* 1160/*
1163 * Wireless Handler : set ESSID 1161 * Wireless Handler : set ESSID
1164 */ 1162 */
1165static int ray_set_essid(struct net_device *dev, 1163static int ray_set_essid(struct net_device *dev, struct iw_request_info *info,
1166 struct iw_request_info *info, 1164 union iwreq_data *wrqu, char *extra)
1167 struct iw_point *dwrq, char *extra)
1168{ 1165{
1169 ray_dev_t *local = netdev_priv(dev); 1166 ray_dev_t *local = netdev_priv(dev);
1170 1167
@@ -1173,19 +1170,17 @@ static int ray_set_essid(struct net_device *dev,
1173 return -EBUSY; 1170 return -EBUSY;
1174 1171
1175 /* Check if we asked for `any' */ 1172 /* Check if we asked for `any' */
1176 if (dwrq->flags == 0) { 1173 if (wrqu->essid.flags == 0)
1177 /* Corey : can you do that ? */ 1174 /* Corey : can you do that ? */
1178 return -EOPNOTSUPP; 1175 return -EOPNOTSUPP;
1179 } else {
1180 /* Check the size of the string */
1181 if (dwrq->length > IW_ESSID_MAX_SIZE) {
1182 return -E2BIG;
1183 }
1184 1176
1185 /* Set the ESSID in the card */ 1177 /* Check the size of the string */
1186 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE); 1178 if (wrqu->essid.length > IW_ESSID_MAX_SIZE)
1187 memcpy(local->sparm.b5.a_current_ess_id, extra, dwrq->length); 1179 return -E2BIG;
1188 } 1180
1181 /* Set the ESSID in the card */
1182 memset(local->sparm.b5.a_current_ess_id, 0, IW_ESSID_MAX_SIZE);
1183 memcpy(local->sparm.b5.a_current_ess_id, extra, wrqu->essid.length);
1189 1184
1190 return -EINPROGRESS; /* Call commit handler */ 1185 return -EINPROGRESS; /* Call commit handler */
1191} 1186}
@@ -1194,9 +1189,8 @@ static int ray_set_essid(struct net_device *dev,
1194/* 1189/*
1195 * Wireless Handler : get ESSID 1190 * Wireless Handler : get ESSID
1196 */ 1191 */
1197static int ray_get_essid(struct net_device *dev, 1192static int ray_get_essid(struct net_device *dev, struct iw_request_info *info,
1198 struct iw_request_info *info, 1193 union iwreq_data *wrqu, char *extra)
1199 struct iw_point *dwrq, char *extra)
1200{ 1194{
1201 ray_dev_t *local = netdev_priv(dev); 1195 ray_dev_t *local = netdev_priv(dev);
1202 1196
@@ -1204,8 +1198,8 @@ static int ray_get_essid(struct net_device *dev,
1204 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE); 1198 memcpy(extra, local->sparm.b5.a_current_ess_id, IW_ESSID_MAX_SIZE);
1205 1199
1206 /* Push it out ! */ 1200 /* Push it out ! */
1207 dwrq->length = strlen(extra); 1201 wrqu->essid.length = strlen(extra);
1208 dwrq->flags = 1; /* active */ 1202 wrqu->essid.flags = 1; /* active */
1209 1203
1210 return 0; 1204 return 0;
1211} 1205}
@@ -1214,14 +1208,13 @@ static int ray_get_essid(struct net_device *dev,
1214/* 1208/*
1215 * Wireless Handler : get AP address 1209 * Wireless Handler : get AP address
1216 */ 1210 */
1217static int ray_get_wap(struct net_device *dev, 1211static int ray_get_wap(struct net_device *dev, struct iw_request_info *info,
1218 struct iw_request_info *info, 1212 union iwreq_data *wrqu, char *extra)
1219 struct sockaddr *awrq, char *extra)
1220{ 1213{
1221 ray_dev_t *local = netdev_priv(dev); 1214 ray_dev_t *local = netdev_priv(dev);
1222 1215
1223 memcpy(awrq->sa_data, local->bss_id, ETH_ALEN); 1216 memcpy(wrqu->ap_addr.sa_data, local->bss_id, ETH_ALEN);
1224 awrq->sa_family = ARPHRD_ETHER; 1217 wrqu->ap_addr.sa_family = ARPHRD_ETHER;
1225 1218
1226 return 0; 1219 return 0;
1227} 1220}
@@ -1230,9 +1223,8 @@ static int ray_get_wap(struct net_device *dev,
1230/* 1223/*
1231 * Wireless Handler : set Bit-Rate 1224 * Wireless Handler : set Bit-Rate
1232 */ 1225 */
1233static int ray_set_rate(struct net_device *dev, 1226static int ray_set_rate(struct net_device *dev, struct iw_request_info *info,
1234 struct iw_request_info *info, 1227 union iwreq_data *wrqu, char *extra)
1235 struct iw_param *vwrq, char *extra)
1236{ 1228{
1237 ray_dev_t *local = netdev_priv(dev); 1229 ray_dev_t *local = netdev_priv(dev);
1238 1230
@@ -1241,15 +1233,15 @@ static int ray_set_rate(struct net_device *dev,
1241 return -EBUSY; 1233 return -EBUSY;
1242 1234
1243 /* Check if rate is in range */ 1235 /* Check if rate is in range */
1244 if ((vwrq->value != 1000000) && (vwrq->value != 2000000)) 1236 if ((wrqu->bitrate.value != 1000000) && (wrqu->bitrate.value != 2000000))
1245 return -EINVAL; 1237 return -EINVAL;
1246 1238
1247 /* Hack for 1.5 Mb/s instead of 2 Mb/s */ 1239 /* Hack for 1.5 Mb/s instead of 2 Mb/s */
1248 if ((local->fw_ver == 0x55) && /* Please check */ 1240 if ((local->fw_ver == 0x55) && /* Please check */
1249 (vwrq->value == 2000000)) 1241 (wrqu->bitrate.value == 2000000))
1250 local->net_default_tx_rate = 3; 1242 local->net_default_tx_rate = 3;
1251 else 1243 else
1252 local->net_default_tx_rate = vwrq->value / 500000; 1244 local->net_default_tx_rate = wrqu->bitrate.value / 500000;
1253 1245
1254 return 0; 1246 return 0;
1255} 1247}
@@ -1258,17 +1250,16 @@ static int ray_set_rate(struct net_device *dev,
1258/* 1250/*
1259 * Wireless Handler : get Bit-Rate 1251 * Wireless Handler : get Bit-Rate
1260 */ 1252 */
1261static int ray_get_rate(struct net_device *dev, 1253static int ray_get_rate(struct net_device *dev, struct iw_request_info *info,
1262 struct iw_request_info *info, 1254 union iwreq_data *wrqu, char *extra)
1263 struct iw_param *vwrq, char *extra)
1264{ 1255{
1265 ray_dev_t *local = netdev_priv(dev); 1256 ray_dev_t *local = netdev_priv(dev);
1266 1257
1267 if (local->net_default_tx_rate == 3) 1258 if (local->net_default_tx_rate == 3)
1268 vwrq->value = 2000000; /* Hum... */ 1259 wrqu->bitrate.value = 2000000; /* Hum... */
1269 else 1260 else
1270 vwrq->value = local->net_default_tx_rate * 500000; 1261 wrqu->bitrate.value = local->net_default_tx_rate * 500000;
1271 vwrq->fixed = 0; /* We are in auto mode */ 1262 wrqu->bitrate.fixed = 0; /* We are in auto mode */
1272 1263
1273 return 0; 1264 return 0;
1274} 1265}
@@ -1277,19 +1268,18 @@ static int ray_get_rate(struct net_device *dev,
1277/* 1268/*
1278 * Wireless Handler : set RTS threshold 1269 * Wireless Handler : set RTS threshold
1279 */ 1270 */
1280static int ray_set_rts(struct net_device *dev, 1271static int ray_set_rts(struct net_device *dev, struct iw_request_info *info,
1281 struct iw_request_info *info, 1272 union iwreq_data *wrqu, char *extra)
1282 struct iw_param *vwrq, char *extra)
1283{ 1273{
1284 ray_dev_t *local = netdev_priv(dev); 1274 ray_dev_t *local = netdev_priv(dev);
1285 int rthr = vwrq->value; 1275 int rthr = wrqu->rts.value;
1286 1276
1287 /* Reject if card is already initialised */ 1277 /* Reject if card is already initialised */
1288 if (local->card_status != CARD_AWAITING_PARAM) 1278 if (local->card_status != CARD_AWAITING_PARAM)
1289 return -EBUSY; 1279 return -EBUSY;
1290 1280
1291 /* if(wrq->u.rts.fixed == 0) we should complain */ 1281 /* if(wrq->u.rts.fixed == 0) we should complain */
1292 if (vwrq->disabled) 1282 if (wrqu->rts.disabled)
1293 rthr = 32767; 1283 rthr = 32767;
1294 else { 1284 else {
1295 if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */ 1285 if ((rthr < 0) || (rthr > 2347)) /* What's the max packet size ??? */
@@ -1305,16 +1295,15 @@ static int ray_set_rts(struct net_device *dev,
1305/* 1295/*
1306 * Wireless Handler : get RTS threshold 1296 * Wireless Handler : get RTS threshold
1307 */ 1297 */
1308static int ray_get_rts(struct net_device *dev, 1298static int ray_get_rts(struct net_device *dev, struct iw_request_info *info,
1309 struct iw_request_info *info, 1299 union iwreq_data *wrqu, char *extra)
1310 struct iw_param *vwrq, char *extra)
1311{ 1300{
1312 ray_dev_t *local = netdev_priv(dev); 1301 ray_dev_t *local = netdev_priv(dev);
1313 1302
1314 vwrq->value = (local->sparm.b5.a_rts_threshold[0] << 8) 1303 wrqu->rts.value = (local->sparm.b5.a_rts_threshold[0] << 8)
1315 + local->sparm.b5.a_rts_threshold[1]; 1304 + local->sparm.b5.a_rts_threshold[1];
1316 vwrq->disabled = (vwrq->value == 32767); 1305 wrqu->rts.disabled = (wrqu->rts.value == 32767);
1317 vwrq->fixed = 1; 1306 wrqu->rts.fixed = 1;
1318 1307
1319 return 0; 1308 return 0;
1320} 1309}
@@ -1323,19 +1312,18 @@ static int ray_get_rts(struct net_device *dev,
1323/* 1312/*
1324 * Wireless Handler : set Fragmentation threshold 1313 * Wireless Handler : set Fragmentation threshold
1325 */ 1314 */
1326static int ray_set_frag(struct net_device *dev, 1315static int ray_set_frag(struct net_device *dev, struct iw_request_info *info,
1327 struct iw_request_info *info, 1316 union iwreq_data *wrqu, char *extra)
1328 struct iw_param *vwrq, char *extra)
1329{ 1317{
1330 ray_dev_t *local = netdev_priv(dev); 1318 ray_dev_t *local = netdev_priv(dev);
1331 int fthr = vwrq->value; 1319 int fthr = wrqu->frag.value;
1332 1320
1333 /* Reject if card is already initialised */ 1321 /* Reject if card is already initialised */
1334 if (local->card_status != CARD_AWAITING_PARAM) 1322 if (local->card_status != CARD_AWAITING_PARAM)
1335 return -EBUSY; 1323 return -EBUSY;
1336 1324
1337 /* if(wrq->u.frag.fixed == 0) should complain */ 1325 /* if(wrq->u.frag.fixed == 0) should complain */
1338 if (vwrq->disabled) 1326 if (wrqu->frag.disabled)
1339 fthr = 32767; 1327 fthr = 32767;
1340 else { 1328 else {
1341 if ((fthr < 256) || (fthr > 2347)) /* To check out ! */ 1329 if ((fthr < 256) || (fthr > 2347)) /* To check out ! */
@@ -1351,16 +1339,15 @@ static int ray_set_frag(struct net_device *dev,
1351/* 1339/*
1352 * Wireless Handler : get Fragmentation threshold 1340 * Wireless Handler : get Fragmentation threshold
1353 */ 1341 */
1354static int ray_get_frag(struct net_device *dev, 1342static int ray_get_frag(struct net_device *dev, struct iw_request_info *info,
1355 struct iw_request_info *info, 1343 union iwreq_data *wrqu, char *extra)
1356 struct iw_param *vwrq, char *extra)
1357{ 1344{
1358 ray_dev_t *local = netdev_priv(dev); 1345 ray_dev_t *local = netdev_priv(dev);
1359 1346
1360 vwrq->value = (local->sparm.b5.a_frag_threshold[0] << 8) 1347 wrqu->frag.value = (local->sparm.b5.a_frag_threshold[0] << 8)
1361 + local->sparm.b5.a_frag_threshold[1]; 1348 + local->sparm.b5.a_frag_threshold[1];
1362 vwrq->disabled = (vwrq->value == 32767); 1349 wrqu->frag.disabled = (wrqu->frag.value == 32767);
1363 vwrq->fixed = 1; 1350 wrqu->frag.fixed = 1;
1364 1351
1365 return 0; 1352 return 0;
1366} 1353}
@@ -1369,8 +1356,8 @@ static int ray_get_frag(struct net_device *dev,
1369/* 1356/*
1370 * Wireless Handler : set Mode of Operation 1357 * Wireless Handler : set Mode of Operation
1371 */ 1358 */
1372static int ray_set_mode(struct net_device *dev, 1359static int ray_set_mode(struct net_device *dev, struct iw_request_info *info,
1373 struct iw_request_info *info, __u32 *uwrq, char *extra) 1360 union iwreq_data *wrqu, char *extra)
1374{ 1361{
1375 ray_dev_t *local = netdev_priv(dev); 1362 ray_dev_t *local = netdev_priv(dev);
1376 int err = -EINPROGRESS; /* Call commit handler */ 1363 int err = -EINPROGRESS; /* Call commit handler */
@@ -1380,7 +1367,7 @@ static int ray_set_mode(struct net_device *dev,
1380 if (local->card_status != CARD_AWAITING_PARAM) 1367 if (local->card_status != CARD_AWAITING_PARAM)
1381 return -EBUSY; 1368 return -EBUSY;
1382 1369
1383 switch (*uwrq) { 1370 switch (wrqu->mode) {
1384 case IW_MODE_ADHOC: 1371 case IW_MODE_ADHOC:
1385 card_mode = 0; 1372 card_mode = 0;
1386 /* Fall through */ 1373 /* Fall through */
@@ -1398,15 +1385,15 @@ static int ray_set_mode(struct net_device *dev,
1398/* 1385/*
1399 * Wireless Handler : get Mode of Operation 1386 * Wireless Handler : get Mode of Operation
1400 */ 1387 */
1401static int ray_get_mode(struct net_device *dev, 1388static int ray_get_mode(struct net_device *dev, struct iw_request_info *info,
1402 struct iw_request_info *info, __u32 *uwrq, char *extra) 1389 union iwreq_data *wrqu, char *extra)
1403{ 1390{
1404 ray_dev_t *local = netdev_priv(dev); 1391 ray_dev_t *local = netdev_priv(dev);
1405 1392
1406 if (local->sparm.b5.a_network_type) 1393 if (local->sparm.b5.a_network_type)
1407 *uwrq = IW_MODE_INFRA; 1394 wrqu->mode = IW_MODE_INFRA;
1408 else 1395 else
1409 *uwrq = IW_MODE_ADHOC; 1396 wrqu->mode = IW_MODE_ADHOC;
1410 1397
1411 return 0; 1398 return 0;
1412} 1399}
@@ -1415,16 +1402,15 @@ static int ray_get_mode(struct net_device *dev,
1415/* 1402/*
1416 * Wireless Handler : get range info 1403 * Wireless Handler : get range info
1417 */ 1404 */
1418static int ray_get_range(struct net_device *dev, 1405static int ray_get_range(struct net_device *dev, struct iw_request_info *info,
1419 struct iw_request_info *info, 1406 union iwreq_data *wrqu, char *extra)
1420 struct iw_point *dwrq, char *extra)
1421{ 1407{
1422 struct iw_range *range = (struct iw_range *)extra; 1408 struct iw_range *range = (struct iw_range *)extra;
1423 1409
1424 memset((char *)range, 0, sizeof(struct iw_range)); 1410 memset(range, 0, sizeof(struct iw_range));
1425 1411
1426 /* Set the length (very important for backward compatibility) */ 1412 /* Set the length (very important for backward compatibility) */
1427 dwrq->length = sizeof(struct iw_range); 1413 wrqu->data.length = sizeof(struct iw_range);
1428 1414
1429 /* Set the Wireless Extension versions */ 1415 /* Set the Wireless Extension versions */
1430 range->we_version_compiled = WIRELESS_EXT; 1416 range->we_version_compiled = WIRELESS_EXT;
@@ -1447,8 +1433,7 @@ static int ray_get_range(struct net_device *dev,
1447/* 1433/*
1448 * Wireless Private Handler : set framing mode 1434 * Wireless Private Handler : set framing mode
1449 */ 1435 */
1450static int ray_set_framing(struct net_device *dev, 1436static int ray_set_framing(struct net_device *dev, struct iw_request_info *info,
1451 struct iw_request_info *info,
1452 union iwreq_data *wrqu, char *extra) 1437 union iwreq_data *wrqu, char *extra)
1453{ 1438{
1454 translate = *(extra); /* Set framing mode */ 1439 translate = *(extra); /* Set framing mode */
@@ -1460,8 +1445,7 @@ static int ray_set_framing(struct net_device *dev,
1460/* 1445/*
1461 * Wireless Private Handler : get framing mode 1446 * Wireless Private Handler : get framing mode
1462 */ 1447 */
1463static int ray_get_framing(struct net_device *dev, 1448static int ray_get_framing(struct net_device *dev, struct iw_request_info *info,
1464 struct iw_request_info *info,
1465 union iwreq_data *wrqu, char *extra) 1449 union iwreq_data *wrqu, char *extra)
1466{ 1450{
1467 *(extra) = translate; 1451 *(extra) = translate;
@@ -1473,8 +1457,7 @@ static int ray_get_framing(struct net_device *dev,
1473/* 1457/*
1474 * Wireless Private Handler : get country 1458 * Wireless Private Handler : get country
1475 */ 1459 */
1476static int ray_get_country(struct net_device *dev, 1460static int ray_get_country(struct net_device *dev, struct iw_request_info *info,
1477 struct iw_request_info *info,
1478 union iwreq_data *wrqu, char *extra) 1461 union iwreq_data *wrqu, char *extra)
1479{ 1462{
1480 *(extra) = country; 1463 *(extra) = country;
@@ -1486,10 +1469,9 @@ static int ray_get_country(struct net_device *dev,
1486/* 1469/*
1487 * Commit handler : called after a bunch of SET operations 1470 * Commit handler : called after a bunch of SET operations
1488 */ 1471 */
1489static int ray_commit(struct net_device *dev, struct iw_request_info *info, /* NULL */ 1472static int ray_commit(struct net_device *dev, struct iw_request_info *info,
1490 void *zwrq, /* NULL */ 1473 union iwreq_data *wrqu, char *extra)
1491 char *extra) 1474{
1492{ /* NULL */
1493 return 0; 1475 return 0;
1494} 1476}
1495 1477
@@ -1530,28 +1512,28 @@ static iw_stats *ray_get_wireless_stats(struct net_device *dev)
1530 */ 1512 */
1531 1513
1532static const iw_handler ray_handler[] = { 1514static const iw_handler ray_handler[] = {
1533 [SIOCSIWCOMMIT - SIOCIWFIRST] = (iw_handler) ray_commit, 1515 IW_HANDLER(SIOCSIWCOMMIT, ray_commit),
1534 [SIOCGIWNAME - SIOCIWFIRST] = (iw_handler) ray_get_name, 1516 IW_HANDLER(SIOCGIWNAME, ray_get_name),
1535 [SIOCSIWFREQ - SIOCIWFIRST] = (iw_handler) ray_set_freq, 1517 IW_HANDLER(SIOCSIWFREQ, ray_set_freq),
1536 [SIOCGIWFREQ - SIOCIWFIRST] = (iw_handler) ray_get_freq, 1518 IW_HANDLER(SIOCGIWFREQ, ray_get_freq),
1537 [SIOCSIWMODE - SIOCIWFIRST] = (iw_handler) ray_set_mode, 1519 IW_HANDLER(SIOCSIWMODE, ray_set_mode),
1538 [SIOCGIWMODE - SIOCIWFIRST] = (iw_handler) ray_get_mode, 1520 IW_HANDLER(SIOCGIWMODE, ray_get_mode),
1539 [SIOCGIWRANGE - SIOCIWFIRST] = (iw_handler) ray_get_range, 1521 IW_HANDLER(SIOCGIWRANGE, ray_get_range),
1540#ifdef WIRELESS_SPY 1522#ifdef WIRELESS_SPY
1541 [SIOCSIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_spy, 1523 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1542 [SIOCGIWSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_spy, 1524 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1543 [SIOCSIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_set_thrspy, 1525 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1544 [SIOCGIWTHRSPY - SIOCIWFIRST] = (iw_handler) iw_handler_get_thrspy, 1526 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1545#endif /* WIRELESS_SPY */ 1527#endif /* WIRELESS_SPY */
1546 [SIOCGIWAP - SIOCIWFIRST] = (iw_handler) ray_get_wap, 1528 IW_HANDLER(SIOCGIWAP, ray_get_wap),
1547 [SIOCSIWESSID - SIOCIWFIRST] = (iw_handler) ray_set_essid, 1529 IW_HANDLER(SIOCSIWESSID, ray_set_essid),
1548 [SIOCGIWESSID - SIOCIWFIRST] = (iw_handler) ray_get_essid, 1530 IW_HANDLER(SIOCGIWESSID, ray_get_essid),
1549 [SIOCSIWRATE - SIOCIWFIRST] = (iw_handler) ray_set_rate, 1531 IW_HANDLER(SIOCSIWRATE, ray_set_rate),
1550 [SIOCGIWRATE - SIOCIWFIRST] = (iw_handler) ray_get_rate, 1532 IW_HANDLER(SIOCGIWRATE, ray_get_rate),
1551 [SIOCSIWRTS - SIOCIWFIRST] = (iw_handler) ray_set_rts, 1533 IW_HANDLER(SIOCSIWRTS, ray_set_rts),
1552 [SIOCGIWRTS - SIOCIWFIRST] = (iw_handler) ray_get_rts, 1534 IW_HANDLER(SIOCGIWRTS, ray_get_rts),
1553 [SIOCSIWFRAG - SIOCIWFIRST] = (iw_handler) ray_set_frag, 1535 IW_HANDLER(SIOCSIWFRAG, ray_set_frag),
1554 [SIOCGIWFRAG - SIOCIWFIRST] = (iw_handler) ray_get_frag, 1536 IW_HANDLER(SIOCGIWFRAG, ray_get_frag),
1555}; 1537};
1556 1538
1557#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */ 1539#define SIOCSIPFRAMING SIOCIWFIRSTPRIV /* Set framing mode */
@@ -1559,9 +1541,9 @@ static const iw_handler ray_handler[] = {
1559#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */ 1541#define SIOCGIPCOUNTRY SIOCIWFIRSTPRIV + 3 /* Get country code */
1560 1542
1561static const iw_handler ray_private_handler[] = { 1543static const iw_handler ray_private_handler[] = {
1562 [0] = (iw_handler) ray_set_framing, 1544 [0] = ray_set_framing,
1563 [1] = (iw_handler) ray_get_framing, 1545 [1] = ray_get_framing,
1564 [3] = (iw_handler) ray_get_country, 1546 [3] = ray_get_country,
1565}; 1547};
1566 1548
1567static const struct iw_priv_args ray_private_args[] = { 1549static const struct iw_priv_args ray_private_args[] = {
@@ -1892,17 +1874,17 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1892 writeb(0xff, &pccs->var); 1874 writeb(0xff, &pccs->var);
1893 local->num_multi = 0xff; 1875 local->num_multi = 0xff;
1894 } else { 1876 } else {
1895 struct dev_mc_list *dmi; 1877 struct netdev_hw_addr *ha;
1896 int i = 0; 1878 int i = 0;
1897 1879
1898 /* Copy the kernel's list of MC addresses to card */ 1880 /* Copy the kernel's list of MC addresses to card */
1899 netdev_for_each_mc_addr(dmi, dev) { 1881 netdev_for_each_mc_addr(ha, dev) {
1900 memcpy_toio(p, dmi->dmi_addr, ETH_ALEN); 1882 memcpy_toio(p, ha->addr, ETH_ALEN);
1901 dev_dbg(&link->dev, 1883 dev_dbg(&link->dev,
1902 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1884 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
1903 dmi->dmi_addr[0], dmi->dmi_addr[1], 1885 ha->addr[0], ha->addr[1],
1904 dmi->dmi_addr[2], dmi->dmi_addr[3], 1886 ha->addr[2], ha->addr[3],
1905 dmi->dmi_addr[4], dmi->dmi_addr[5]); 1887 ha->addr[4], ha->addr[5]);
1906 p += ETH_ALEN; 1888 p += ETH_ALEN;
1907 i++; 1889 i++;
1908 } 1890 }
@@ -2251,7 +2233,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2251 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2233 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2252 FCS_LEN)) { 2234 FCS_LEN)) {
2253 pr_debug( 2235 pr_debug(
2254 "ray_cs invalid packet length %d received \n", 2236 "ray_cs invalid packet length %d received\n",
2255 rx_len); 2237 rx_len);
2256 return; 2238 return;
2257 } 2239 }
@@ -2262,7 +2244,7 @@ static void rx_data(struct net_device *dev, struct rcs __iomem *prcs,
2262 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN + 2244 (dev->mtu + RX_MAC_HEADER_LENGTH + ETH_HLEN +
2263 FCS_LEN)) { 2245 FCS_LEN)) {
2264 pr_debug( 2246 pr_debug(
2265 "ray_cs invalid packet length %d received \n", 2247 "ray_cs invalid packet length %d received\n",
2266 rx_len); 2248 rx_len);
2267 return; 2249 return;
2268 } 2250 }
@@ -2770,11 +2752,11 @@ static int ray_cs_proc_show(struct seq_file *m, void *v)
2770 seq_printf(m, "Hop dwell = %d Kus\n", 2752 seq_printf(m, "Hop dwell = %d Kus\n",
2771 pfh->dwell_time[0] + 2753 pfh->dwell_time[0] +
2772 256 * pfh->dwell_time[1]); 2754 256 * pfh->dwell_time[1]);
2773 seq_printf(m, "Hop set = %d \n", 2755 seq_printf(m, "Hop set = %d\n",
2774 pfh->hop_set); 2756 pfh->hop_set);
2775 seq_printf(m, "Hop pattern = %d \n", 2757 seq_printf(m, "Hop pattern = %d\n",
2776 pfh->hop_pattern); 2758 pfh->hop_pattern);
2777 seq_printf(m, "Hop index = %d \n", 2759 seq_printf(m, "Hop index = %d\n",
2778 pfh->hop_index); 2760 pfh->hop_index);
2779 p += p[1] + 2; 2761 p += p[1] + 2;
2780 } else { 2762 } else {
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 1de5b22d3efe..babdcdf6d71d 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -118,6 +118,7 @@ MODULE_PARM_DESC(workaround_interval,
118#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d) 118#define OID_802_11_ADD_KEY cpu_to_le32(0x0d01011d)
119#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e) 119#define OID_802_11_REMOVE_KEY cpu_to_le32(0x0d01011e)
120#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f) 120#define OID_802_11_ASSOCIATION_INFORMATION cpu_to_le32(0x0d01011f)
121#define OID_802_11_CAPABILITY cpu_to_le32(0x0d010122)
121#define OID_802_11_PMKID cpu_to_le32(0x0d010123) 122#define OID_802_11_PMKID cpu_to_le32(0x0d010123)
122#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203) 123#define OID_802_11_NETWORK_TYPES_SUPPORTED cpu_to_le32(0x0d010203)
123#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204) 124#define OID_802_11_NETWORK_TYPE_IN_USE cpu_to_le32(0x0d010204)
@@ -359,6 +360,30 @@ struct ndis_80211_assoc_info {
359 __le32 offset_resp_ies; 360 __le32 offset_resp_ies;
360} __attribute__((packed)); 361} __attribute__((packed));
361 362
363struct ndis_80211_auth_encr_pair {
364 __le32 auth_mode;
365 __le32 encr_mode;
366} __attribute__((packed));
367
368struct ndis_80211_capability {
369 __le32 length;
370 __le32 version;
371 __le32 num_pmkids;
372 __le32 num_auth_encr_pair;
373 struct ndis_80211_auth_encr_pair auth_encr_pair[0];
374} __attribute__((packed));
375
376struct ndis_80211_bssid_info {
377 u8 bssid[6];
378 u8 pmkid[16];
379};
380
381struct ndis_80211_pmkid {
382 __le32 length;
383 __le32 bssid_info_count;
384 struct ndis_80211_bssid_info bssid_info[0];
385};
386
362/* 387/*
363 * private data 388 * private data
364 */ 389 */
@@ -477,13 +502,7 @@ struct rndis_wlan_private {
477 /* encryption stuff */ 502 /* encryption stuff */
478 int encr_tx_key_index; 503 int encr_tx_key_index;
479 struct rndis_wlan_encr_key encr_keys[4]; 504 struct rndis_wlan_encr_key encr_keys[4];
480 enum nl80211_auth_type wpa_auth_type;
481 int wpa_version; 505 int wpa_version;
482 int wpa_keymgmt;
483 int wpa_ie_len;
484 u8 *wpa_ie;
485 int wpa_cipher_pair;
486 int wpa_cipher_group;
487 506
488 u8 command_buffer[COMMAND_BUFFER_SIZE]; 507 u8 command_buffer[COMMAND_BUFFER_SIZE];
489}; 508};
@@ -535,6 +554,14 @@ static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
535static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev, 554static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
536 int idx, u8 *mac, struct station_info *sinfo); 555 int idx, u8 *mac, struct station_info *sinfo);
537 556
557static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
558 struct cfg80211_pmksa *pmksa);
559
560static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
561 struct cfg80211_pmksa *pmksa);
562
563static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
564
538static struct cfg80211_ops rndis_config_ops = { 565static struct cfg80211_ops rndis_config_ops = {
539 .change_virtual_intf = rndis_change_virtual_intf, 566 .change_virtual_intf = rndis_change_virtual_intf,
540 .scan = rndis_scan, 567 .scan = rndis_scan,
@@ -551,6 +578,9 @@ static struct cfg80211_ops rndis_config_ops = {
551 .set_default_key = rndis_set_default_key, 578 .set_default_key = rndis_set_default_key,
552 .get_station = rndis_get_station, 579 .get_station = rndis_get_station,
553 .dump_station = rndis_dump_station, 580 .dump_station = rndis_dump_station,
581 .set_pmksa = rndis_set_pmksa,
582 .del_pmksa = rndis_del_pmksa,
583 .flush_pmksa = rndis_flush_pmksa,
554}; 584};
555 585
556static void *rndis_wiphy_privid = &rndis_wiphy_privid; 586static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -705,6 +735,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
705 struct rndis_query_c *get_c; 735 struct rndis_query_c *get_c;
706 } u; 736 } u;
707 int ret, buflen; 737 int ret, buflen;
738 int resplen, respoffs, copylen;
708 739
709 buflen = *len + sizeof(*u.get); 740 buflen = *len + sizeof(*u.get);
710 if (buflen < CONTROL_BUFFER_SIZE) 741 if (buflen < CONTROL_BUFFER_SIZE)
@@ -734,11 +765,34 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
734 le32_to_cpu(u.get_c->status)); 765 le32_to_cpu(u.get_c->status));
735 766
736 if (ret == 0) { 767 if (ret == 0) {
737 memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len); 768 resplen = le32_to_cpu(u.get_c->len);
769 respoffs = le32_to_cpu(u.get_c->offset) + 8;
738 770
739 ret = le32_to_cpu(u.get_c->len); 771 if (respoffs > buflen) {
740 if (ret > *len) 772 /* Device returned data offset outside buffer, error. */
741 *len = ret; 773 netdev_dbg(dev->net, "%s(%s): received invalid "
774 "data offset: %d > %d\n", __func__,
775 oid_to_string(oid), respoffs, buflen);
776
777 ret = -EINVAL;
778 goto exit_unlock;
779 }
780
781 if ((resplen + respoffs) > buflen) {
782 /* Device would have returned more data if buffer would
783 * have been big enough. Copy just the bits that we got.
784 */
785 copylen = buflen - respoffs;
786 } else {
787 copylen = resplen;
788 }
789
790 if (copylen > *len)
791 copylen = *len;
792
793 memcpy(data, u.buf + respoffs, copylen);
794
795 *len = resplen;
742 796
743 ret = rndis_error_status(u.get_c->status); 797 ret = rndis_error_status(u.get_c->status);
744 if (ret < 0) 798 if (ret < 0)
@@ -747,6 +801,7 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
747 le32_to_cpu(u.get_c->status), ret); 801 le32_to_cpu(u.get_c->status), ret);
748 } 802 }
749 803
804exit_unlock:
750 mutex_unlock(&priv->command_lock); 805 mutex_unlock(&priv->command_lock);
751 806
752 if (u.buf != priv->command_buffer) 807 if (u.buf != priv->command_buffer)
@@ -1092,8 +1147,6 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
1092 } 1147 }
1093 1148
1094 priv->wpa_version = wpa_version; 1149 priv->wpa_version = wpa_version;
1095 priv->wpa_auth_type = auth_type;
1096 priv->wpa_keymgmt = keymgmt;
1097 1150
1098 return 0; 1151 return 0;
1099} 1152}
@@ -1118,7 +1171,6 @@ static int set_priv_filter(struct usbnet *usbdev)
1118 1171
1119static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise) 1172static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1120{ 1173{
1121 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1122 __le32 tmp; 1174 __le32 tmp;
1123 int encr_mode, ret; 1175 int encr_mode, ret;
1124 1176
@@ -1147,8 +1199,6 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
1147 return ret; 1199 return ret;
1148 } 1200 }
1149 1201
1150 priv->wpa_cipher_pair = pairwise;
1151 priv->wpa_cipher_group = groupwise;
1152 return 0; 1202 return 0;
1153} 1203}
1154 1204
@@ -1496,7 +1546,7 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
1496static void set_multicast_list(struct usbnet *usbdev) 1546static void set_multicast_list(struct usbnet *usbdev)
1497{ 1547{
1498 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 1548 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1499 struct dev_mc_list *mclist; 1549 struct netdev_hw_addr *ha;
1500 __le32 filter, basefilter; 1550 __le32 filter, basefilter;
1501 int ret; 1551 int ret;
1502 char *mc_addrs = NULL; 1552 char *mc_addrs = NULL;
@@ -1535,9 +1585,9 @@ static void set_multicast_list(struct usbnet *usbdev)
1535 return; 1585 return;
1536 } 1586 }
1537 1587
1538 netdev_for_each_mc_addr(mclist, usbdev->net) 1588 netdev_for_each_mc_addr(ha, usbdev->net)
1539 memcpy(mc_addrs + i++ * ETH_ALEN, 1589 memcpy(mc_addrs + i++ * ETH_ALEN,
1540 mclist->dmi_addr, ETH_ALEN); 1590 ha->addr, ETH_ALEN);
1541 } 1591 }
1542 netif_addr_unlock_bh(usbdev->net); 1592 netif_addr_unlock_bh(usbdev->net);
1543 1593
@@ -1569,6 +1619,194 @@ set_filter:
1569 le32_to_cpu(filter), ret); 1619 le32_to_cpu(filter), ret);
1570} 1620}
1571 1621
1622#ifdef DEBUG
1623static void debug_print_pmkids(struct usbnet *usbdev,
1624 struct ndis_80211_pmkid *pmkids,
1625 const char *func_str)
1626{
1627 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1628 int i, len, count, max_pmkids, entry_len;
1629
1630 max_pmkids = priv->wdev.wiphy->max_num_pmkids;
1631 len = le32_to_cpu(pmkids->length);
1632 count = le32_to_cpu(pmkids->bssid_info_count);
1633
1634 entry_len = (count > 0) ? (len - sizeof(*pmkids)) / count : -1;
1635
1636 netdev_dbg(usbdev->net, "%s(): %d PMKIDs (data len: %d, entry len: "
1637 "%d)\n", func_str, count, len, entry_len);
1638
1639 if (count > max_pmkids)
1640 count = max_pmkids;
1641
1642 for (i = 0; i < count; i++) {
1643 u32 *tmp = (u32 *)pmkids->bssid_info[i].pmkid;
1644
1645 netdev_dbg(usbdev->net, "%s(): bssid: %pM, "
1646 "pmkid: %08X:%08X:%08X:%08X\n",
1647 func_str, pmkids->bssid_info[i].bssid,
1648 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
1649 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
1650 }
1651}
1652#else
1653static void debug_print_pmkids(struct usbnet *usbdev,
1654 struct ndis_80211_pmkid *pmkids,
1655 const char *func_str)
1656{
1657 return;
1658}
1659#endif
1660
1661static struct ndis_80211_pmkid *get_device_pmkids(struct usbnet *usbdev)
1662{
1663 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1664 struct ndis_80211_pmkid *pmkids;
1665 int len, ret, max_pmkids;
1666
1667 max_pmkids = priv->wdev.wiphy->max_num_pmkids;
1668 len = sizeof(*pmkids) + max_pmkids * sizeof(pmkids->bssid_info[0]);
1669
1670 pmkids = kzalloc(len, GFP_KERNEL);
1671 if (!pmkids)
1672 return ERR_PTR(-ENOMEM);
1673
1674 pmkids->length = cpu_to_le32(len);
1675 pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
1676
1677 ret = rndis_query_oid(usbdev, OID_802_11_PMKID, pmkids, &len);
1678 if (ret < 0) {
1679 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d)"
1680 " -> %d\n", __func__, len, max_pmkids, ret);
1681
1682 kfree(pmkids);
1683 return ERR_PTR(ret);
1684 }
1685
1686 if (le32_to_cpu(pmkids->bssid_info_count) > max_pmkids)
1687 pmkids->bssid_info_count = cpu_to_le32(max_pmkids);
1688
1689 debug_print_pmkids(usbdev, pmkids, __func__);
1690
1691 return pmkids;
1692}
1693
1694static int set_device_pmkids(struct usbnet *usbdev,
1695 struct ndis_80211_pmkid *pmkids)
1696{
1697 int ret, len, num_pmkids;
1698
1699 num_pmkids = le32_to_cpu(pmkids->bssid_info_count);
1700 len = sizeof(*pmkids) + num_pmkids * sizeof(pmkids->bssid_info[0]);
1701 pmkids->length = cpu_to_le32(len);
1702
1703 debug_print_pmkids(usbdev, pmkids, __func__);
1704
1705 ret = rndis_set_oid(usbdev, OID_802_11_PMKID, pmkids,
1706 le32_to_cpu(pmkids->length));
1707 if (ret < 0) {
1708 netdev_dbg(usbdev->net, "%s(): OID_802_11_PMKID(%d, %d) -> %d"
1709 "\n", __func__, len, num_pmkids, ret);
1710 }
1711
1712 kfree(pmkids);
1713 return ret;
1714}
1715
1716static struct ndis_80211_pmkid *remove_pmkid(struct usbnet *usbdev,
1717 struct ndis_80211_pmkid *pmkids,
1718 struct cfg80211_pmksa *pmksa,
1719 int max_pmkids)
1720{
1721 int i, len, count, newlen, err;
1722
1723 len = le32_to_cpu(pmkids->length);
1724 count = le32_to_cpu(pmkids->bssid_info_count);
1725
1726 if (count > max_pmkids)
1727 count = max_pmkids;
1728
1729 for (i = 0; i < count; i++)
1730 if (!compare_ether_addr(pmkids->bssid_info[i].bssid,
1731 pmksa->bssid))
1732 break;
1733
1734 /* pmkid not found */
1735 if (i == count) {
1736 netdev_dbg(usbdev->net, "%s(): bssid not found (%pM)\n",
1737 __func__, pmksa->bssid);
1738 err = -ENOENT;
1739 goto error;
1740 }
1741
1742 for (; i + 1 < count; i++)
1743 pmkids->bssid_info[i] = pmkids->bssid_info[i + 1];
1744
1745 count--;
1746 newlen = sizeof(*pmkids) + count * sizeof(pmkids->bssid_info[0]);
1747
1748 pmkids->length = cpu_to_le32(newlen);
1749 pmkids->bssid_info_count = cpu_to_le32(count);
1750
1751 return pmkids;
1752error:
1753 kfree(pmkids);
1754 return ERR_PTR(err);
1755}
1756
1757static struct ndis_80211_pmkid *update_pmkid(struct usbnet *usbdev,
1758 struct ndis_80211_pmkid *pmkids,
1759 struct cfg80211_pmksa *pmksa,
1760 int max_pmkids)
1761{
1762 int i, err, len, count, newlen;
1763
1764 len = le32_to_cpu(pmkids->length);
1765 count = le32_to_cpu(pmkids->bssid_info_count);
1766
1767 if (count > max_pmkids)
1768 count = max_pmkids;
1769
1770 /* update with new pmkid */
1771 for (i = 0; i < count; i++) {
1772 if (compare_ether_addr(pmkids->bssid_info[i].bssid,
1773 pmksa->bssid))
1774 continue;
1775
1776 memcpy(pmkids->bssid_info[i].pmkid, pmksa->pmkid,
1777 WLAN_PMKID_LEN);
1778
1779 return pmkids;
1780 }
1781
1782 /* out of space, return error */
1783 if (i == max_pmkids) {
1784 netdev_dbg(usbdev->net, "%s(): out of space\n", __func__);
1785 err = -ENOSPC;
1786 goto error;
1787 }
1788
1789 /* add new pmkid */
1790 newlen = sizeof(*pmkids) + (count + 1) * sizeof(pmkids->bssid_info[0]);
1791
1792 pmkids = krealloc(pmkids, newlen, GFP_KERNEL);
1793 if (!pmkids) {
1794 err = -ENOMEM;
1795 goto error;
1796 }
1797
1798 pmkids->length = cpu_to_le32(newlen);
1799 pmkids->bssid_info_count = cpu_to_le32(count + 1);
1800
1801 memcpy(pmkids->bssid_info[count].bssid, pmksa->bssid, ETH_ALEN);
1802 memcpy(pmkids->bssid_info[count].pmkid, pmksa->pmkid, WLAN_PMKID_LEN);
1803
1804 return pmkids;
1805error:
1806 kfree(pmkids);
1807 return ERR_PTR(err);
1808}
1809
1572/* 1810/*
1573 * cfg80211 ops 1811 * cfg80211 ops
1574 */ 1812 */
@@ -2179,6 +2417,78 @@ static int rndis_dump_station(struct wiphy *wiphy, struct net_device *dev,
2179 return 0; 2417 return 0;
2180} 2418}
2181 2419
2420static int rndis_set_pmksa(struct wiphy *wiphy, struct net_device *netdev,
2421 struct cfg80211_pmksa *pmksa)
2422{
2423 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2424 struct usbnet *usbdev = priv->usbdev;
2425 struct ndis_80211_pmkid *pmkids;
2426 u32 *tmp = (u32 *)pmksa->pmkid;
2427
2428 netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
2429 pmksa->bssid,
2430 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
2431 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
2432
2433 pmkids = get_device_pmkids(usbdev);
2434 if (IS_ERR(pmkids)) {
2435 /* couldn't read PMKID cache from device */
2436 return PTR_ERR(pmkids);
2437 }
2438
2439 pmkids = update_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
2440 if (IS_ERR(pmkids)) {
2441 /* not found, list full, etc */
2442 return PTR_ERR(pmkids);
2443 }
2444
2445 return set_device_pmkids(usbdev, pmkids);
2446}
2447
2448static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
2449 struct cfg80211_pmksa *pmksa)
2450{
2451 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2452 struct usbnet *usbdev = priv->usbdev;
2453 struct ndis_80211_pmkid *pmkids;
2454 u32 *tmp = (u32 *)pmksa->pmkid;
2455
2456 netdev_dbg(usbdev->net, "%s(%pM, %08X:%08X:%08X:%08X)\n", __func__,
2457 pmksa->bssid,
2458 cpu_to_be32(tmp[0]), cpu_to_be32(tmp[1]),
2459 cpu_to_be32(tmp[2]), cpu_to_be32(tmp[3]));
2460
2461 pmkids = get_device_pmkids(usbdev);
2462 if (IS_ERR(pmkids)) {
2463 /* Couldn't read PMKID cache from device */
2464 return PTR_ERR(pmkids);
2465 }
2466
2467 pmkids = remove_pmkid(usbdev, pmkids, pmksa, wiphy->max_num_pmkids);
2468 if (IS_ERR(pmkids)) {
2469 /* not found, etc */
2470 return PTR_ERR(pmkids);
2471 }
2472
2473 return set_device_pmkids(usbdev, pmkids);
2474}
2475
2476static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2477{
2478 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2479 struct usbnet *usbdev = priv->usbdev;
2480 struct ndis_80211_pmkid pmkid;
2481
2482 netdev_dbg(usbdev->net, "%s()\n", __func__);
2483
2484 memset(&pmkid, 0, sizeof(pmkid));
2485
2486 pmkid.length = cpu_to_le32(sizeof(pmkid));
2487 pmkid.bssid_info_count = cpu_to_le32(0);
2488
2489 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
2490}
2491
2182/* 2492/*
2183 * workers, indication handlers, device poller 2493 * workers, indication handlers, device poller
2184 */ 2494 */
@@ -2523,12 +2833,14 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
2523 } 2833 }
2524} 2834}
2525 2835
2526static int rndis_wlan_get_caps(struct usbnet *usbdev) 2836static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
2527{ 2837{
2528 struct { 2838 struct {
2529 __le32 num_items; 2839 __le32 num_items;
2530 __le32 items[8]; 2840 __le32 items[8];
2531 } networks_supported; 2841 } networks_supported;
2842 struct ndis_80211_capability *caps;
2843 u8 caps_buf[sizeof(*caps) + sizeof(caps->auth_encr_pair) * 16];
2532 int len, retval, i, n; 2844 int len, retval, i, n;
2533 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 2845 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2534 2846
@@ -2556,6 +2868,21 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev)
2556 } 2868 }
2557 } 2869 }
2558 2870
2871 /* get device 802.11 capabilities, number of PMKIDs */
2872 caps = (struct ndis_80211_capability *)caps_buf;
2873 len = sizeof(caps_buf);
2874 retval = rndis_query_oid(usbdev, OID_802_11_CAPABILITY, caps, &len);
2875 if (retval >= 0) {
2876 netdev_dbg(usbdev->net, "OID_802_11_CAPABILITY -> len %d, "
2877 "ver %d, pmkids %d, auth-encr-pairs %d\n",
2878 le32_to_cpu(caps->length),
2879 le32_to_cpu(caps->version),
2880 le32_to_cpu(caps->num_pmkids),
2881 le32_to_cpu(caps->num_auth_encr_pair));
2882 wiphy->max_num_pmkids = le32_to_cpu(caps->num_pmkids);
2883 } else
2884 wiphy->max_num_pmkids = 0;
2885
2559 return retval; 2886 return retval;
2560} 2887}
2561 2888
@@ -2803,7 +3130,7 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
2803 wiphy->max_scan_ssids = 1; 3130 wiphy->max_scan_ssids = 1;
2804 3131
2805 /* TODO: fill-out band/encr information based on priv->caps */ 3132 /* TODO: fill-out band/encr information based on priv->caps */
2806 rndis_wlan_get_caps(usbdev); 3133 rndis_wlan_get_caps(usbdev, wiphy);
2807 3134
2808 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels)); 3135 memcpy(priv->channels, rndis_channels, sizeof(rndis_channels));
2809 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates)); 3136 memcpy(priv->rates, rndis_rates, sizeof(rndis_rates));
@@ -2863,9 +3190,6 @@ static void rndis_wlan_unbind(struct usbnet *usbdev, struct usb_interface *intf)
2863 flush_workqueue(priv->workqueue); 3190 flush_workqueue(priv->workqueue);
2864 destroy_workqueue(priv->workqueue); 3191 destroy_workqueue(priv->workqueue);
2865 3192
2866 if (priv && priv->wpa_ie_len)
2867 kfree(priv->wpa_ie);
2868
2869 rndis_unbind(usbdev, intf); 3193 rndis_unbind(usbdev, intf);
2870 3194
2871 wiphy_unregister(priv->wdev.wiphy); 3195 wiphy_unregister(priv->wdev.wiphy);
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 5f5204b82891..cdbf59108ef9 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -526,6 +526,10 @@ static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev,
526 526
527 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 527 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
528 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 528 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
529 } else {
530 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
531 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
532 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
529 } 533 }
530 534
531 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 535 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2a73f593aab0..89e986f449da 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -574,6 +574,10 @@ static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev,
574 574
575 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); 575 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1);
576 rt2x00pci_register_write(rt2x00dev, CSR20, reg); 576 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
577 } else {
578 rt2x00pci_register_read(rt2x00dev, CSR20, &reg);
579 rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0);
580 rt2x00pci_register_write(rt2x00dev, CSR20, reg);
577 } 581 }
578 582
579 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 583 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 8ebb705fe106..7185cb05f257 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -649,6 +649,10 @@ static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev,
649 649
650 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); 650 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1);
651 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); 651 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
652 } else {
653 rt2500usb_register_read(rt2x00dev, MAC_CSR18, &reg);
654 rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0);
655 rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg);
652 } 656 }
653 657
654 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 658 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
@@ -1644,11 +1648,6 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1644 unsigned int i; 1648 unsigned int i;
1645 1649
1646 /* 1650 /*
1647 * Disable powersaving as default.
1648 */
1649 rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
1650
1651 /*
1652 * Initialize all hw fields. 1651 * Initialize all hw fields.
1653 */ 1652 */
1654 rt2x00dev->hw->flags = 1653 rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 74c0433dba37..ec893721cc80 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -56,15 +56,20 @@
56#define RF3021 0x0007 56#define RF3021 0x0007
57#define RF3022 0x0008 57#define RF3022 0x0008
58#define RF3052 0x0009 58#define RF3052 0x0009
59#define RF3320 0x000b
59 60
60/* 61/*
61 * Chipset version. 62 * Chipset revisions.
62 */ 63 */
63#define RT2860C_VERSION 0x0100 64#define REV_RT2860C 0x0100
64#define RT2860D_VERSION 0x0101 65#define REV_RT2860D 0x0101
65#define RT2880E_VERSION 0x0200 66#define REV_RT2870D 0x0101
66#define RT2883_VERSION 0x0300 67#define REV_RT2872E 0x0200
67#define RT3070_VERSION 0x0200 68#define REV_RT3070E 0x0200
69#define REV_RT3070F 0x0201
70#define REV_RT3071E 0x0211
71#define REV_RT3090E 0x0211
72#define REV_RT3390E 0x0211
68 73
69/* 74/*
70 * Signal information. 75 * Signal information.
@@ -90,10 +95,16 @@
90#define NUM_TX_QUEUES 4 95#define NUM_TX_QUEUES 4
91 96
92/* 97/*
93 * USB registers. 98 * Registers.
94 */ 99 */
95 100
96/* 101/*
102 * OPT_14: Unknown register used by rt3xxx devices.
103 */
104#define OPT_14_CSR 0x0114
105#define OPT_14_CSR_BIT0 FIELD32(0x00000001)
106
107/*
97 * INT_SOURCE_CSR: Interrupt source register. 108 * INT_SOURCE_CSR: Interrupt source register.
98 * Write one to clear corresponding bit. 109 * Write one to clear corresponding bit.
99 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c 110 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
@@ -398,6 +409,31 @@
398#define EFUSE_DATA3 0x059c 409#define EFUSE_DATA3 0x059c
399 410
400/* 411/*
412 * LDO_CFG0
413 */
414#define LDO_CFG0 0x05d4
415#define LDO_CFG0_DELAY3 FIELD32(0x000000ff)
416#define LDO_CFG0_DELAY2 FIELD32(0x0000ff00)
417#define LDO_CFG0_DELAY1 FIELD32(0x00ff0000)
418#define LDO_CFG0_BGSEL FIELD32(0x03000000)
419#define LDO_CFG0_LDO_CORE_VLEVEL FIELD32(0x1c000000)
420#define LD0_CFG0_LDO25_LEVEL FIELD32(0x60000000)
421#define LDO_CFG0_LDO25_LARGEA FIELD32(0x80000000)
422
423/*
424 * GPIO_SWITCH
425 */
426#define GPIO_SWITCH 0x05dc
427#define GPIO_SWITCH_0 FIELD32(0x00000001)
428#define GPIO_SWITCH_1 FIELD32(0x00000002)
429#define GPIO_SWITCH_2 FIELD32(0x00000004)
430#define GPIO_SWITCH_3 FIELD32(0x00000008)
431#define GPIO_SWITCH_4 FIELD32(0x00000010)
432#define GPIO_SWITCH_5 FIELD32(0x00000020)
433#define GPIO_SWITCH_6 FIELD32(0x00000040)
434#define GPIO_SWITCH_7 FIELD32(0x00000080)
435
436/*
401 * MAC Control/Status Registers(CSR). 437 * MAC Control/Status Registers(CSR).
402 * Some values are set in TU, whereas 1 TU == 1024 us. 438 * Some values are set in TU, whereas 1 TU == 1024 us.
403 */ 439 */
@@ -1492,14 +1528,32 @@ struct mac_iveiv_entry {
1492#define BBP4_BANDWIDTH FIELD8(0x18) 1528#define BBP4_BANDWIDTH FIELD8(0x18)
1493 1529
1494/* 1530/*
1531 * BBP 138: Unknown
1532 */
1533#define BBP138_RX_ADC1 FIELD8(0x02)
1534#define BBP138_RX_ADC2 FIELD8(0x04)
1535#define BBP138_TX_DAC1 FIELD8(0x20)
1536#define BBP138_TX_DAC2 FIELD8(0x40)
1537
1538/*
1495 * RFCSR registers 1539 * RFCSR registers
1496 * The wordsize of the RFCSR is 8 bits. 1540 * The wordsize of the RFCSR is 8 bits.
1497 */ 1541 */
1498 1542
1499/* 1543/*
1544 * RFCSR 1:
1545 */
1546#define RFCSR1_RF_BLOCK_EN FIELD8(0x01)
1547#define RFCSR1_RX0_PD FIELD8(0x04)
1548#define RFCSR1_TX0_PD FIELD8(0x08)
1549#define RFCSR1_RX1_PD FIELD8(0x10)
1550#define RFCSR1_TX1_PD FIELD8(0x20)
1551
1552/*
1500 * RFCSR 6: 1553 * RFCSR 6:
1501 */ 1554 */
1502#define RFCSR6_R FIELD8(0x03) 1555#define RFCSR6_R1 FIELD8(0x03)
1556#define RFCSR6_R2 FIELD8(0x40)
1503 1557
1504/* 1558/*
1505 * RFCSR 7: 1559 * RFCSR 7:
@@ -1512,6 +1566,28 @@ struct mac_iveiv_entry {
1512#define RFCSR12_TX_POWER FIELD8(0x1f) 1566#define RFCSR12_TX_POWER FIELD8(0x1f)
1513 1567
1514/* 1568/*
1569 * RFCSR 15:
1570 */
1571#define RFCSR15_TX_LO2_EN FIELD8(0x08)
1572
1573/*
1574 * RFCSR 17:
1575 */
1576#define RFCSR17_TXMIXER_GAIN FIELD8(0x07)
1577#define RFCSR17_TX_LO1_EN FIELD8(0x08)
1578#define RFCSR17_R FIELD8(0x20)
1579
1580/*
1581 * RFCSR 20:
1582 */
1583#define RFCSR20_RX_LO1_EN FIELD8(0x08)
1584
1585/*
1586 * RFCSR 21:
1587 */
1588#define RFCSR21_RX_LO2_EN FIELD8(0x08)
1589
1590/*
1515 * RFCSR 22: 1591 * RFCSR 22:
1516 */ 1592 */
1517#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01) 1593#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
@@ -1522,6 +1598,14 @@ struct mac_iveiv_entry {
1522#define RFCSR23_FREQ_OFFSET FIELD8(0x7f) 1598#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1523 1599
1524/* 1600/*
1601 * RFCSR 27:
1602 */
1603#define RFCSR27_R1 FIELD8(0x03)
1604#define RFCSR27_R2 FIELD8(0x04)
1605#define RFCSR27_R3 FIELD8(0x30)
1606#define RFCSR27_R4 FIELD8(0x40)
1607
1608/*
1525 * RFCSR 30: 1609 * RFCSR 30:
1526 */ 1610 */
1527#define RFCSR30_RF_CALIBRATION FIELD8(0x80) 1611#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
@@ -1603,6 +1687,8 @@ struct mac_iveiv_entry {
1603#define EEPROM_NIC_WPS_PBC FIELD16(0x0080) 1687#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1604#define EEPROM_NIC_BW40M_BG FIELD16(0x0100) 1688#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1605#define EEPROM_NIC_BW40M_A FIELD16(0x0200) 1689#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1690#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800)
1691#define EEPROM_NIC_DAC_TEST FIELD16(0x8000)
1606 1692
1607/* 1693/*
1608 * EEPROM frequency 1694 * EEPROM frequency
@@ -1659,6 +1745,12 @@ struct mac_iveiv_entry {
1659#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00) 1745#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1660 1746
1661/* 1747/*
1748 * EEPROM TXMIXER GAIN BG offset (note overlaps with EEPROM RSSI BG2).
1749 */
1750#define EEPROM_TXMIXER_GAIN_BG 0x0024
1751#define EEPROM_TXMIXER_GAIN_BG_VAL FIELD16(0x0007)
1752
1753/*
1662 * EEPROM RSSI A offset 1754 * EEPROM RSSI A offset
1663 */ 1755 */
1664#define EEPROM_RSSI_A 0x0025 1756#define EEPROM_RSSI_A 0x0025
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index c015ce9fdd09..2648f315a934 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -360,11 +360,6 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
360 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg); 360 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
361 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on); 361 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
362 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off); 362 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
363 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
364 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
365 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
366 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
367 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
368 rt2800_register_write(led->rt2x00dev, LED_CFG, reg); 363 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
369 364
370 return 0; 365 return 0;
@@ -610,10 +605,6 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
610{ 605{
611 u32 reg; 606 u32 reg;
612 607
613 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
614 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
615 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
616
617 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 608 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
618 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 609 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
619 !!erp->short_preamble); 610 !!erp->short_preamble);
@@ -632,15 +623,12 @@ void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
632 623
633 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg); 624 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
634 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); 625 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
635 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
636 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); 626 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
637 627
638 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg); 628 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
639 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs); 629 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
640 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs); 630 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
641 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
642 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs); 631 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
643 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
644 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); 632 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
645 633
646 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 634 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
@@ -718,10 +706,10 @@ static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
718 rt2x00dev->lna_gain = lna_gain; 706 rt2x00dev->lna_gain = lna_gain;
719} 707}
720 708
721static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev, 709static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev,
722 struct ieee80211_conf *conf, 710 struct ieee80211_conf *conf,
723 struct rf_channel *rf, 711 struct rf_channel *rf,
724 struct channel_info *info) 712 struct channel_info *info)
725{ 713{
726 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); 714 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
727 715
@@ -787,10 +775,10 @@ static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
787 rt2800_rf_write(rt2x00dev, 4, rf->rf4); 775 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
788} 776}
789 777
790static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev, 778static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev,
791 struct ieee80211_conf *conf, 779 struct ieee80211_conf *conf,
792 struct rf_channel *rf, 780 struct rf_channel *rf,
793 struct channel_info *info) 781 struct channel_info *info)
794{ 782{
795 u8 rfcsr; 783 u8 rfcsr;
796 784
@@ -798,7 +786,7 @@ static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
798 rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3); 786 rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3);
799 787
800 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr); 788 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
801 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2); 789 rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2);
802 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); 790 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
803 791
804 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr); 792 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
@@ -827,15 +815,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
827 unsigned int tx_pin; 815 unsigned int tx_pin;
828 u8 bbp; 816 u8 bbp;
829 817
830 if ((rt2x00_rt(rt2x00dev, RT3070) || 818 if (rt2x00_rf(rt2x00dev, RF2020) ||
831 rt2x00_rt(rt2x00dev, RT3090)) && 819 rt2x00_rf(rt2x00dev, RF3020) ||
832 (rt2x00_rf(rt2x00dev, RF2020) || 820 rt2x00_rf(rt2x00dev, RF3021) ||
833 rt2x00_rf(rt2x00dev, RF3020) || 821 rt2x00_rf(rt2x00dev, RF3022))
834 rt2x00_rf(rt2x00dev, RF3021) || 822 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
835 rt2x00_rf(rt2x00dev, RF3022)))
836 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
837 else 823 else
838 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info); 824 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
839 825
840 /* 826 /*
841 * Change BBP settings 827 * Change BBP settings
@@ -899,8 +885,7 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
899 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf)); 885 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
900 rt2800_bbp_write(rt2x00dev, 3, bbp); 886 rt2800_bbp_write(rt2x00dev, 3, bbp);
901 887
902 if (rt2x00_rt(rt2x00dev, RT2860) && 888 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
903 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
904 if (conf_is_ht40(conf)) { 889 if (conf_is_ht40(conf)) {
905 rt2800_bbp_write(rt2x00dev, 69, 0x1a); 890 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
906 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 891 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -988,10 +973,6 @@ static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
988 libconf->conf->short_frame_max_tx_count); 973 libconf->conf->short_frame_max_tx_count);
989 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 974 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
990 libconf->conf->long_frame_max_tx_count); 975 libconf->conf->long_frame_max_tx_count);
991 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
992 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
993 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
994 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
995 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg); 976 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
996} 977}
997 978
@@ -1015,13 +996,13 @@ static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
1015 996
1016 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); 997 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1017 } else { 998 } else {
1018 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1019
1020 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg); 999 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
1021 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0); 1000 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
1022 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0); 1001 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
1023 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0); 1002 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
1024 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); 1003 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
1004
1005 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
1025 } 1006 }
1026} 1007}
1027 1008
@@ -1062,9 +1043,10 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
1062static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) 1043static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1063{ 1044{
1064 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) { 1045 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1065 if (rt2x00_is_usb(rt2x00dev) && 1046 if (rt2x00_rt(rt2x00dev, RT3070) ||
1066 rt2x00_rt(rt2x00dev, RT3070) && 1047 rt2x00_rt(rt2x00dev, RT3071) ||
1067 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) 1048 rt2x00_rt(rt2x00dev, RT3090) ||
1049 rt2x00_rt(rt2x00dev, RT3390))
1068 return 0x1c + (2 * rt2x00dev->lna_gain); 1050 return 0x1c + (2 * rt2x00dev->lna_gain);
1069 else 1051 else
1070 return 0x2e + rt2x00dev->lna_gain; 1052 return 0x2e + rt2x00dev->lna_gain;
@@ -1095,8 +1077,7 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1095void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, 1077void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1096 const u32 count) 1078 const u32 count)
1097{ 1079{
1098 if (rt2x00_rt(rt2x00dev, RT2860) && 1080 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C))
1099 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
1100 return; 1081 return;
1101 1082
1102 /* 1083 /*
@@ -1114,8 +1095,17 @@ EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1114int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) 1095int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1115{ 1096{
1116 u32 reg; 1097 u32 reg;
1098 u16 eeprom;
1117 unsigned int i; 1099 unsigned int i;
1118 1100
1101 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1102 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1103 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1104 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1105 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1106 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1107 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1108
1119 if (rt2x00_is_usb(rt2x00dev)) { 1109 if (rt2x00_is_usb(rt2x00dev)) {
1120 /* 1110 /*
1121 * Wait until BBP and RF are ready. 1111 * Wait until BBP and RF are ready.
@@ -1135,8 +1125,25 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1135 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 1125 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1136 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 1126 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1137 reg & ~0x00002000); 1127 reg & ~0x00002000);
1138 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) 1128 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1129 /*
1130 * Reset DMA indexes
1131 */
1132 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
1133 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
1134 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
1135 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
1136 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
1137 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
1138 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
1139 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
1140 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
1141
1142 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
1143 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
1144
1139 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 1145 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1146 }
1140 1147
1141 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 1148 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1142 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); 1149 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
@@ -1181,12 +1188,42 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1181 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0); 1188 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1182 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); 1189 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1183 1190
1184 if (rt2x00_is_usb(rt2x00dev) && 1191 rt2800_config_filter(rt2x00dev, FIF_ALLMULTI);
1185 rt2x00_rt(rt2x00dev, RT3070) && 1192
1186 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) { 1193 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
1194 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9);
1195 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
1196 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
1197
1198 if (rt2x00_rt(rt2x00dev, RT3071) ||
1199 rt2x00_rt(rt2x00dev, RT3090) ||
1200 rt2x00_rt(rt2x00dev, RT3390)) {
1187 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); 1201 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1188 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); 1202 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1189 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); 1203 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1204 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
1205 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
1206 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1207 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
1208 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
1209 0x0000002c);
1210 else
1211 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
1212 0x0000000f);
1213 } else {
1214 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1215 }
1216 rt2800_register_write(rt2x00dev, TX_SW_CFG2, reg);
1217 } else if (rt2x00_rt(rt2x00dev, RT3070)) {
1218 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1219
1220 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
1221 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1222 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c);
1223 } else {
1224 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1225 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1226 }
1190 } else { 1227 } else {
1191 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); 1228 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1192 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); 1229 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
@@ -1205,19 +1242,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1205 1242
1206 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg); 1243 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1207 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9); 1244 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1245 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32);
1208 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10); 1246 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1209 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg); 1247 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1210 1248
1211 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg); 1249 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1212 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); 1250 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1213 if ((rt2x00_rt(rt2x00dev, RT2872) && 1251 if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) ||
1214 (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
1215 rt2x00_rt(rt2x00dev, RT2880) ||
1216 rt2x00_rt(rt2x00dev, RT2883) || 1252 rt2x00_rt(rt2x00dev, RT2883) ||
1217 rt2x00_rt(rt2x00dev, RT2890) || 1253 rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E))
1218 rt2x00_rt(rt2x00dev, RT3052) ||
1219 (rt2x00_rt(rt2x00dev, RT3070) &&
1220 (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
1221 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2); 1254 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1222 else 1255 else
1223 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1); 1256 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1225,38 +1258,61 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1225 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0); 1258 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1226 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); 1259 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1227 1260
1261 rt2800_register_read(rt2x00dev, LED_CFG, &reg);
1262 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70);
1263 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30);
1264 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
1265 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
1266 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3);
1267 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
1268 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
1269 rt2800_register_write(rt2x00dev, LED_CFG, reg);
1270
1228 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); 1271 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1229 1272
1273 rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
1274 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 15);
1275 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 31);
1276 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
1277 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
1278 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
1279 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
1280 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
1281
1230 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg); 1282 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1231 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1); 1283 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1284 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1);
1232 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0); 1285 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1233 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0); 1286 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1287 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 1);
1234 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0); 1288 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1235 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0); 1289 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1236 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); 1290 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1237 1291
1238 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg); 1292 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1239 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8); 1293 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3);
1240 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0); 1294 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1241 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1); 1295 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1242 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1296 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1243 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1297 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1244 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); 1298 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1245 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1299 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1246 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1300 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1247 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1301 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1302 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1);
1248 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); 1303 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1249 1304
1250 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg); 1305 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1251 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8); 1306 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3);
1252 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0); 1307 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1253 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1); 1308 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1254 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1309 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1255 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1310 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1256 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); 1311 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1257 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1312 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1258 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1313 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1259 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1314 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1315 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1);
1260 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); 1316 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1261 1317
1262 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg); 1318 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
@@ -1269,11 +1325,13 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1269 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0); 1325 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1270 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1326 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1271 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0); 1327 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1328 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0);
1272 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); 1329 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1273 1330
1274 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg); 1331 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1275 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); 1332 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1276 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0); 1333 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL,
1334 !rt2x00_is_usb(rt2x00dev));
1277 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1); 1335 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1278 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1); 1336 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1279 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); 1337 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
@@ -1281,6 +1339,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1281 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1339 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1282 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1340 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1283 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1341 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1342 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0);
1284 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); 1343 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1285 1344
1286 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg); 1345 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
@@ -1293,6 +1352,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1293 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0); 1352 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1294 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1353 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1295 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0); 1354 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1355 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0);
1296 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); 1356 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1297 1357
1298 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg); 1358 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
@@ -1305,6 +1365,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1305 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1); 1365 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1306 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1); 1366 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1307 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1); 1367 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1368 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0);
1308 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); 1369 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1309 1370
1310 if (rt2x00_is_usb(rt2x00dev)) { 1371 if (rt2x00_is_usb(rt2x00dev)) {
@@ -1334,6 +1395,15 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1334 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); 1395 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
1335 1396
1336 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); 1397 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1398
1399 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
1400 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 32);
1401 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 32);
1402 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
1403 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314);
1404 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
1405 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
1406
1337 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); 1407 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1338 1408
1339 /* 1409 /*
@@ -1483,38 +1553,67 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1483 1553
1484 rt2800_bbp_write(rt2x00dev, 65, 0x2c); 1554 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
1485 rt2800_bbp_write(rt2x00dev, 66, 0x38); 1555 rt2800_bbp_write(rt2x00dev, 66, 0x38);
1486 rt2800_bbp_write(rt2x00dev, 69, 0x12); 1556
1557 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) {
1558 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1559 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1560 } else {
1561 rt2800_bbp_write(rt2x00dev, 69, 0x12);
1562 rt2800_bbp_write(rt2x00dev, 73, 0x10);
1563 }
1564
1487 rt2800_bbp_write(rt2x00dev, 70, 0x0a); 1565 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1488 rt2800_bbp_write(rt2x00dev, 73, 0x10); 1566
1489 rt2800_bbp_write(rt2x00dev, 81, 0x37); 1567 if (rt2x00_rt(rt2x00dev, RT3070) ||
1568 rt2x00_rt(rt2x00dev, RT3071) ||
1569 rt2x00_rt(rt2x00dev, RT3090) ||
1570 rt2x00_rt(rt2x00dev, RT3390)) {
1571 rt2800_bbp_write(rt2x00dev, 79, 0x13);
1572 rt2800_bbp_write(rt2x00dev, 80, 0x05);
1573 rt2800_bbp_write(rt2x00dev, 81, 0x33);
1574 } else {
1575 rt2800_bbp_write(rt2x00dev, 81, 0x37);
1576 }
1577
1490 rt2800_bbp_write(rt2x00dev, 82, 0x62); 1578 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1491 rt2800_bbp_write(rt2x00dev, 83, 0x6a); 1579 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
1492 rt2800_bbp_write(rt2x00dev, 84, 0x99); 1580
1581 if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D) ||
1582 rt2x00_rt_rev(rt2x00dev, RT2870, REV_RT2870D))
1583 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1584 else
1585 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1586
1493 rt2800_bbp_write(rt2x00dev, 86, 0x00); 1587 rt2800_bbp_write(rt2x00dev, 86, 0x00);
1494 rt2800_bbp_write(rt2x00dev, 91, 0x04); 1588 rt2800_bbp_write(rt2x00dev, 91, 0x04);
1495 rt2800_bbp_write(rt2x00dev, 92, 0x00); 1589 rt2800_bbp_write(rt2x00dev, 92, 0x00);
1496 rt2800_bbp_write(rt2x00dev, 103, 0x00); 1590
1591 if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) ||
1592 rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) ||
1593 rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E) ||
1594 rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E))
1595 rt2800_bbp_write(rt2x00dev, 103, 0xc0);
1596 else
1597 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1598
1497 rt2800_bbp_write(rt2x00dev, 105, 0x05); 1599 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1600 rt2800_bbp_write(rt2x00dev, 106, 0x35);
1498 1601
1499 if (rt2x00_rt(rt2x00dev, RT2860) && 1602 if (rt2x00_rt(rt2x00dev, RT3071) ||
1500 (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) { 1603 rt2x00_rt(rt2x00dev, RT3090) ||
1501 rt2800_bbp_write(rt2x00dev, 69, 0x16); 1604 rt2x00_rt(rt2x00dev, RT3390)) {
1502 rt2800_bbp_write(rt2x00dev, 73, 0x12); 1605 rt2800_bbp_read(rt2x00dev, 138, &value);
1503 }
1504 1606
1505 if (rt2x00_rt(rt2x00dev, RT2860) && 1607 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1506 (rt2x00_rev(rt2x00dev) > RT2860D_VERSION)) 1608 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
1507 rt2800_bbp_write(rt2x00dev, 84, 0x19); 1609 value |= 0x20;
1610 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
1611 value &= ~0x02;
1508 1612
1509 if (rt2x00_is_usb(rt2x00dev) && 1613 rt2800_bbp_write(rt2x00dev, 138, value);
1510 rt2x00_rt(rt2x00dev, RT3070) &&
1511 (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
1512 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1513 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1514 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1515 } 1614 }
1516 1615
1517 if (rt2x00_rt(rt2x00dev, RT3052)) { 1616 if (rt2x00_rt(rt2x00dev, RT2872)) {
1518 rt2800_bbp_write(rt2x00dev, 31, 0x08); 1617 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1519 rt2800_bbp_write(rt2x00dev, 78, 0x0e); 1618 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1520 rt2800_bbp_write(rt2x00dev, 80, 0x08); 1619 rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1598,19 +1697,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1598{ 1697{
1599 u8 rfcsr; 1698 u8 rfcsr;
1600 u8 bbp; 1699 u8 bbp;
1700 u32 reg;
1701 u16 eeprom;
1601 1702
1602 if (rt2x00_is_usb(rt2x00dev) && 1703 if (!rt2x00_rt(rt2x00dev, RT3070) &&
1603 rt2x00_rt(rt2x00dev, RT3070) && 1704 !rt2x00_rt(rt2x00dev, RT3071) &&
1604 (rt2x00_rev(rt2x00dev) != RT3070_VERSION)) 1705 !rt2x00_rt(rt2x00dev, RT3090) &&
1706 !rt2x00_rt(rt2x00dev, RT3390))
1605 return 0; 1707 return 0;
1606 1708
1607 if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
1608 if (!rt2x00_rf(rt2x00dev, RF3020) &&
1609 !rt2x00_rf(rt2x00dev, RF3021) &&
1610 !rt2x00_rf(rt2x00dev, RF3022))
1611 return 0;
1612 }
1613
1614 /* 1709 /*
1615 * Init RF calibration. 1710 * Init RF calibration.
1616 */ 1711 */
@@ -1621,13 +1716,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1621 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); 1716 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1622 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); 1717 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1623 1718
1624 if (rt2x00_is_usb(rt2x00dev)) { 1719 if (rt2x00_rt(rt2x00dev, RT3070) ||
1720 rt2x00_rt(rt2x00dev, RT3071) ||
1721 rt2x00_rt(rt2x00dev, RT3090)) {
1625 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 1722 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1626 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 1723 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1627 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 1724 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1628 rt2800_rfcsr_write(rt2x00dev, 7, 0x70); 1725 rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
1629 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); 1726 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1630 rt2800_rfcsr_write(rt2x00dev, 10, 0x71); 1727 rt2800_rfcsr_write(rt2x00dev, 10, 0x41);
1631 rt2800_rfcsr_write(rt2x00dev, 11, 0x21); 1728 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1632 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); 1729 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
1633 rt2800_rfcsr_write(rt2x00dev, 14, 0x90); 1730 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
@@ -1640,48 +1737,88 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1640 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); 1737 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1641 rt2800_rfcsr_write(rt2x00dev, 24, 0x16); 1738 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
1642 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 1739 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1643 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1644 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); 1740 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
1645 } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) { 1741 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
1646 rt2800_rfcsr_write(rt2x00dev, 0, 0x50); 1742 rt2800_rfcsr_write(rt2x00dev, 0, 0xa0);
1647 rt2800_rfcsr_write(rt2x00dev, 1, 0x01); 1743 rt2800_rfcsr_write(rt2x00dev, 1, 0xe1);
1648 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); 1744 rt2800_rfcsr_write(rt2x00dev, 2, 0xf1);
1649 rt2800_rfcsr_write(rt2x00dev, 3, 0x75); 1745 rt2800_rfcsr_write(rt2x00dev, 3, 0x62);
1650 rt2800_rfcsr_write(rt2x00dev, 4, 0x40); 1746 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1651 rt2800_rfcsr_write(rt2x00dev, 5, 0x03); 1747 rt2800_rfcsr_write(rt2x00dev, 5, 0x8b);
1652 rt2800_rfcsr_write(rt2x00dev, 6, 0x02); 1748 rt2800_rfcsr_write(rt2x00dev, 6, 0x42);
1653 rt2800_rfcsr_write(rt2x00dev, 7, 0x50); 1749 rt2800_rfcsr_write(rt2x00dev, 7, 0x34);
1654 rt2800_rfcsr_write(rt2x00dev, 8, 0x39); 1750 rt2800_rfcsr_write(rt2x00dev, 8, 0x00);
1655 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); 1751 rt2800_rfcsr_write(rt2x00dev, 9, 0xc0);
1656 rt2800_rfcsr_write(rt2x00dev, 10, 0x60); 1752 rt2800_rfcsr_write(rt2x00dev, 10, 0x61);
1657 rt2800_rfcsr_write(rt2x00dev, 11, 0x21); 1753 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1658 rt2800_rfcsr_write(rt2x00dev, 12, 0x75); 1754 rt2800_rfcsr_write(rt2x00dev, 12, 0x3b);
1659 rt2800_rfcsr_write(rt2x00dev, 13, 0x75); 1755 rt2800_rfcsr_write(rt2x00dev, 13, 0xe0);
1660 rt2800_rfcsr_write(rt2x00dev, 14, 0x90); 1756 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1661 rt2800_rfcsr_write(rt2x00dev, 15, 0x58); 1757 rt2800_rfcsr_write(rt2x00dev, 15, 0x53);
1662 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); 1758 rt2800_rfcsr_write(rt2x00dev, 16, 0xe0);
1663 rt2800_rfcsr_write(rt2x00dev, 17, 0x92); 1759 rt2800_rfcsr_write(rt2x00dev, 17, 0x94);
1664 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); 1760 rt2800_rfcsr_write(rt2x00dev, 18, 0x5c);
1665 rt2800_rfcsr_write(rt2x00dev, 19, 0x02); 1761 rt2800_rfcsr_write(rt2x00dev, 19, 0x4a);
1666 rt2800_rfcsr_write(rt2x00dev, 20, 0xba); 1762 rt2800_rfcsr_write(rt2x00dev, 20, 0xb2);
1667 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); 1763 rt2800_rfcsr_write(rt2x00dev, 21, 0xf6);
1668 rt2800_rfcsr_write(rt2x00dev, 22, 0x00); 1764 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
1669 rt2800_rfcsr_write(rt2x00dev, 23, 0x31); 1765 rt2800_rfcsr_write(rt2x00dev, 23, 0x14);
1670 rt2800_rfcsr_write(rt2x00dev, 24, 0x08); 1766 rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
1671 rt2800_rfcsr_write(rt2x00dev, 25, 0x01); 1767 rt2800_rfcsr_write(rt2x00dev, 25, 0x3d);
1672 rt2800_rfcsr_write(rt2x00dev, 26, 0x25); 1768 rt2800_rfcsr_write(rt2x00dev, 26, 0x85);
1673 rt2800_rfcsr_write(rt2x00dev, 27, 0x23); 1769 rt2800_rfcsr_write(rt2x00dev, 27, 0x00);
1674 rt2800_rfcsr_write(rt2x00dev, 28, 0x13); 1770 rt2800_rfcsr_write(rt2x00dev, 28, 0x41);
1675 rt2800_rfcsr_write(rt2x00dev, 29, 0x83); 1771 rt2800_rfcsr_write(rt2x00dev, 29, 0x8f);
1772 rt2800_rfcsr_write(rt2x00dev, 30, 0x20);
1773 rt2800_rfcsr_write(rt2x00dev, 31, 0x0f);
1774 }
1775
1776 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) {
1777 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
1778 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
1779 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
1780 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
1781 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
1782 rt2x00_rt(rt2x00dev, RT3090)) {
1783 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
1784 rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1);
1785 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
1786
1787 rt2800_rfcsr_write(rt2x00dev, 31, 0x14);
1788
1789 rt2800_register_read(rt2x00dev, LDO_CFG0, &reg);
1790 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
1791 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1792 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
1793 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1794 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST))
1795 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
1796 else
1797 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
1798 }
1799 rt2800_register_write(rt2x00dev, LDO_CFG0, reg);
1800 } else if (rt2x00_rt(rt2x00dev, RT3390)) {
1801 rt2800_register_read(rt2x00dev, GPIO_SWITCH, &reg);
1802 rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0);
1803 rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg);
1676 } 1804 }
1677 1805
1678 /* 1806 /*
1679 * Set RX Filter calibration for 20MHz and 40MHz 1807 * Set RX Filter calibration for 20MHz and 40MHz
1680 */ 1808 */
1681 rt2x00dev->calibration[0] = 1809 if (rt2x00_rt(rt2x00dev, RT3070)) {
1682 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16); 1810 rt2x00dev->calibration[0] =
1683 rt2x00dev->calibration[1] = 1811 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1684 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19); 1812 rt2x00dev->calibration[1] =
1813 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1814 } else if (rt2x00_rt(rt2x00dev, RT3071) ||
1815 rt2x00_rt(rt2x00dev, RT3090) ||
1816 rt2x00_rt(rt2x00dev, RT3390)) {
1817 rt2x00dev->calibration[0] =
1818 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x13);
1819 rt2x00dev->calibration[1] =
1820 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x15);
1821 }
1685 1822
1686 /* 1823 /*
1687 * Set back to initial state 1824 * Set back to initial state
@@ -1699,6 +1836,81 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1699 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); 1836 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1700 rt2800_bbp_write(rt2x00dev, 4, bbp); 1837 rt2800_bbp_write(rt2x00dev, 4, bbp);
1701 1838
1839 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
1840 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1841 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
1842 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E))
1843 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1844
1845 rt2800_register_read(rt2x00dev, OPT_14_CSR, &reg);
1846 rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1);
1847 rt2800_register_write(rt2x00dev, OPT_14_CSR, reg);
1848
1849 rt2800_rfcsr_read(rt2x00dev, 17, &rfcsr);
1850 rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0);
1851 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1852 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
1853 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
1854 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1855 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
1856 rt2x00_set_field8(&rfcsr, RFCSR17_R, 1);
1857 }
1858 rt2x00_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG, &eeprom);
1859 if (rt2x00_get_field16(eeprom, EEPROM_TXMIXER_GAIN_BG_VAL) >= 1)
1860 rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN,
1861 rt2x00_get_field16(eeprom,
1862 EEPROM_TXMIXER_GAIN_BG_VAL));
1863 rt2800_rfcsr_write(rt2x00dev, 17, rfcsr);
1864
1865 if (rt2x00_rt(rt2x00dev, RT3090)) {
1866 rt2800_bbp_read(rt2x00dev, 138, &bbp);
1867
1868 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1869 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1)
1870 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
1871 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1)
1872 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
1873
1874 rt2800_bbp_write(rt2x00dev, 138, bbp);
1875 }
1876
1877 if (rt2x00_rt(rt2x00dev, RT3071) ||
1878 rt2x00_rt(rt2x00dev, RT3090) ||
1879 rt2x00_rt(rt2x00dev, RT3390)) {
1880 rt2800_rfcsr_read(rt2x00dev, 1, &rfcsr);
1881 rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1);
1882 rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0);
1883 rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0);
1884 rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1);
1885 rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1);
1886 rt2800_rfcsr_write(rt2x00dev, 1, rfcsr);
1887
1888 rt2800_rfcsr_read(rt2x00dev, 15, &rfcsr);
1889 rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0);
1890 rt2800_rfcsr_write(rt2x00dev, 15, rfcsr);
1891
1892 rt2800_rfcsr_read(rt2x00dev, 20, &rfcsr);
1893 rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0);
1894 rt2800_rfcsr_write(rt2x00dev, 20, rfcsr);
1895
1896 rt2800_rfcsr_read(rt2x00dev, 21, &rfcsr);
1897 rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0);
1898 rt2800_rfcsr_write(rt2x00dev, 21, rfcsr);
1899 }
1900
1901 if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071)) {
1902 rt2800_rfcsr_read(rt2x00dev, 27, &rfcsr);
1903 if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) ||
1904 rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E))
1905 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3);
1906 else
1907 rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0);
1908 rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0);
1909 rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0);
1910 rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0);
1911 rt2800_rfcsr_write(rt2x00dev, 27, rfcsr);
1912 }
1913
1702 return 0; 1914 return 0;
1703} 1915}
1704EXPORT_SYMBOL_GPL(rt2800_init_rfcsr); 1916EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
@@ -1775,9 +1987,7 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1775 } else if (rt2x00_rt(rt2x00dev, RT2860) || 1987 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
1776 rt2x00_rt(rt2x00dev, RT2870) || 1988 rt2x00_rt(rt2x00dev, RT2870) ||
1777 rt2x00_rt(rt2x00dev, RT2872) || 1989 rt2x00_rt(rt2x00dev, RT2872) ||
1778 rt2x00_rt(rt2x00dev, RT2880) || 1990 rt2x00_rt(rt2x00dev, RT2872)) {
1779 (rt2x00_rt(rt2x00dev, RT2883) &&
1780 (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
1781 /* 1991 /*
1782 * There is a max of 2 RX streams for RT28x0 series 1992 * There is a max of 2 RX streams for RT28x0 series
1783 */ 1993 */
@@ -1882,10 +2092,7 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
1882 if (!rt2x00_rt(rt2x00dev, RT2860) && 2092 if (!rt2x00_rt(rt2x00dev, RT2860) &&
1883 !rt2x00_rt(rt2x00dev, RT2870) && 2093 !rt2x00_rt(rt2x00dev, RT2870) &&
1884 !rt2x00_rt(rt2x00dev, RT2872) && 2094 !rt2x00_rt(rt2x00dev, RT2872) &&
1885 !rt2x00_rt(rt2x00dev, RT2880) &&
1886 !rt2x00_rt(rt2x00dev, RT2883) && 2095 !rt2x00_rt(rt2x00dev, RT2883) &&
1887 !rt2x00_rt(rt2x00dev, RT2890) &&
1888 !rt2x00_rt(rt2x00dev, RT3052) &&
1889 !rt2x00_rt(rt2x00dev, RT3070) && 2096 !rt2x00_rt(rt2x00dev, RT3070) &&
1890 !rt2x00_rt(rt2x00dev, RT3071) && 2097 !rt2x00_rt(rt2x00dev, RT3071) &&
1891 !rt2x00_rt(rt2x00dev, RT3090) && 2098 !rt2x00_rt(rt2x00dev, RT3090) &&
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index 91cce2d0f6db..2131f8f0c502 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -60,6 +60,12 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
60 unsigned int i; 60 unsigned int i;
61 u32 reg; 61 u32 reg;
62 62
63 /*
64 * SOC devices don't support MCU requests.
65 */
66 if (rt2x00_is_soc(rt2x00dev))
67 return;
68
63 for (i = 0; i < 200; i++) { 69 for (i = 0; i < 200; i++) {
64 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg); 70 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
65 71
@@ -341,19 +347,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
341 struct queue_entry_priv_pci *entry_priv; 347 struct queue_entry_priv_pci *entry_priv;
342 u32 reg; 348 u32 reg;
343 349
344 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
345 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
346 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
347 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
348 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
349 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
350 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
351 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
352 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
353
354 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
355 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
356
357 /* 350 /*
358 * Initialize registers. 351 * Initialize registers.
359 */ 352 */
@@ -907,14 +900,12 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
907{ 900{
908 struct data_queue *queue; 901 struct data_queue *queue;
909 struct queue_entry *entry; 902 struct queue_entry *entry;
910 struct queue_entry *entry_done; 903 __le32 *txwi;
911 struct queue_entry_priv_pci *entry_priv;
912 struct txdone_entry_desc txdesc; 904 struct txdone_entry_desc txdesc;
913 u32 word; 905 u32 word;
914 u32 reg; 906 u32 reg;
915 u32 old_reg; 907 u32 old_reg;
916 unsigned int type; 908 int wcid, ack, pid, tx_wcid, tx_ack, tx_pid;
917 unsigned int index;
918 u16 mcs, real_mcs; 909 u16 mcs, real_mcs;
919 910
920 /* 911 /*
@@ -936,76 +927,89 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
936 break; 927 break;
937 old_reg = reg; 928 old_reg = reg;
938 929
930 wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID);
931 ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED);
932 pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE);
933
939 /* 934 /*
940 * Skip this entry when it contains an invalid 935 * Skip this entry when it contains an invalid
941 * queue identication number. 936 * queue identication number.
942 */ 937 */
943 type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1; 938 if (pid <= 0 || pid > QID_RX)
944 if (type >= QID_RX)
945 continue; 939 continue;
946 940
947 queue = rt2x00queue_get_queue(rt2x00dev, type); 941 queue = rt2x00queue_get_queue(rt2x00dev, pid - 1);
948 if (unlikely(!queue)) 942 if (unlikely(!queue))
949 continue; 943 continue;
950 944
951 /* 945 /*
952 * Skip this entry when it contains an invalid 946 * Inside each queue, we process each entry in a chronological
953 * index number. 947 * order. We first check that the queue is not empty.
954 */ 948 */
955 index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1; 949 if (rt2x00queue_empty(queue))
956 if (unlikely(index >= queue->limit))
957 continue; 950 continue;
951 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
958 952
959 entry = &queue->entries[index]; 953 /* Check if we got a match by looking at WCID/ACK/PID
960 entry_priv = entry->priv_data; 954 * fields */
961 rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word); 955 txwi = (__le32 *)(entry->skb->data -
956 rt2x00dev->ops->extra_tx_headroom);
962 957
963 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 958 rt2x00_desc_read(txwi, 1, &word);
964 while (entry != entry_done) { 959 tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID);
965 /* 960 tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK);
966 * Catch up. 961 tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID);
967 * Just report any entries we missed as failed.
968 */
969 WARNING(rt2x00dev,
970 "TX status report missed for entry %d\n",
971 entry_done->entry_idx);
972 962
973 txdesc.flags = 0; 963 if ((wcid != tx_wcid) || (ack != tx_ack) || (pid != tx_pid))
974 __set_bit(TXDONE_UNKNOWN, &txdesc.flags); 964 WARNING(rt2x00dev, "invalid TX_STA_FIFO content\n");
975 txdesc.retry = 0;
976
977 rt2x00lib_txdone(entry_done, &txdesc);
978 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
979 }
980 965
981 /* 966 /*
982 * Obtain the status about this packet. 967 * Obtain the status about this packet.
983 */ 968 */
984 txdesc.flags = 0; 969 txdesc.flags = 0;
985 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) 970 rt2x00_desc_read(txwi, 0, &word);
986 __set_bit(TXDONE_SUCCESS, &txdesc.flags); 971 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
987 else 972 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
988 __set_bit(TXDONE_FAILURE, &txdesc.flags);
989 973
990 /* 974 /*
991 * Ralink has a retry mechanism using a global fallback 975 * Ralink has a retry mechanism using a global fallback
992 * table. We setup this fallback table to try immediate 976 * table. We setup this fallback table to try the immediate
993 * lower rate for all rates. In the TX_STA_FIFO, 977 * lower rate for all rates. In the TX_STA_FIFO, the MCS field
994 * the MCS field contains the MCS used for the successfull 978 * always contains the MCS used for the last transmission, be
995 * transmission. If the first transmission succeed, 979 * it successful or not.
996 * we have mcs == tx_mcs. On the second transmission,
997 * we have mcs = tx_mcs - 1. So the number of
998 * retry is (tx_mcs - mcs).
999 */ 980 */
1000 mcs = rt2x00_get_field32(word, TXWI_W0_MCS); 981 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS)) {
1001 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS); 982 /*
983 * Transmission succeeded. The number of retries is
984 * mcs - real_mcs
985 */
986 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
987 txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0);
988 } else {
989 /*
990 * Transmission failed. The number of retries is
991 * always 7 in this case (for a total number of 8
992 * frames sent).
993 */
994 __set_bit(TXDONE_FAILURE, &txdesc.flags);
995 txdesc.retry = 7;
996 }
997
1002 __set_bit(TXDONE_FALLBACK, &txdesc.flags); 998 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
1003 txdesc.retry = mcs - min(mcs, real_mcs); 999
1004 1000
1005 rt2x00lib_txdone(entry, &txdesc); 1001 rt2x00lib_txdone(entry, &txdesc);
1006 } 1002 }
1007} 1003}
1008 1004
1005static void rt2800pci_wakeup(struct rt2x00_dev *rt2x00dev)
1006{
1007 struct ieee80211_conf conf = { .flags = 0 };
1008 struct rt2x00lib_conf libconf = { .conf = &conf };
1009
1010 rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
1011}
1012
1009static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance) 1013static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1010{ 1014{
1011 struct rt2x00_dev *rt2x00dev = dev_instance; 1015 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -1030,6 +1034,9 @@ static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1030 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) 1034 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
1031 rt2800pci_txdone(rt2x00dev); 1035 rt2800pci_txdone(rt2x00dev);
1032 1036
1037 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP))
1038 rt2800pci_wakeup(rt2x00dev);
1039
1033 return IRQ_HANDLED; 1040 return IRQ_HANDLED;
1034} 1041}
1035 1042
@@ -1184,6 +1191,7 @@ static const struct rt2x00_ops rt2800pci_ops = {
1184/* 1191/*
1185 * RT2800pci module information. 1192 * RT2800pci module information.
1186 */ 1193 */
1194#ifdef CONFIG_RT2800PCI_PCI
1187static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 1195static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1188 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1196 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1189 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1197 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1208,9 +1216,11 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1208 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1216 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1209 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1217 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
1210 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1218 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1219 { PCI_DEVICE(0x1814, 0x3593), PCI_DEVICE_DATA(&rt2800pci_ops) },
1211#endif 1220#endif
1212 { 0, } 1221 { 0, }
1213}; 1222};
1223#endif /* CONFIG_RT2800PCI_PCI */
1214 1224
1215MODULE_AUTHOR(DRV_PROJECT); 1225MODULE_AUTHOR(DRV_PROJECT);
1216MODULE_VERSION(DRV_VERSION); 1226MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index d27d7d5d850c..6b809ab42c61 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -876,6 +876,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
876 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) }, 876 { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
877 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) }, 877 { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
878 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, 878 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
880 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
879 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 881 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
880 /* SMC */ 882 /* SMC */
881 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 883 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -905,8 +907,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
905 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 907 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
906 /* AirTies */ 908 /* AirTies */
907 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) }, 909 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
910 /* ASUS */
911 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
908 /* AzureWave */ 912 /* AzureWave */
909 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) }, 913 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
914 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
915 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
916 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
910 /* Conceptronic */ 917 /* Conceptronic */
911 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) }, 918 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
912 /* Corega */ 919 /* Corega */
@@ -916,20 +923,46 @@ static struct usb_device_id rt2800usb_device_table[] = {
916 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) }, 923 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
917 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) }, 924 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
918 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) }, 925 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
926 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
927 /* Draytek */
928 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
919 /* Edimax */ 929 /* Edimax */
920 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) }, 930 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
921 /* Encore */ 931 /* Encore */
922 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) }, 932 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
933 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
923 /* EnGenius */ 934 /* EnGenius */
924 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) }, 935 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
925 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) }, 936 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
926 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) }, 937 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
938 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
939 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
940 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
927 /* Gigabyte */ 941 /* Gigabyte */
928 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) }, 942 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
929 /* I-O DATA */ 943 /* I-O DATA */
930 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) }, 944 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
945 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
946 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
947 /* Logitec */
948 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
931 /* MSI */ 949 /* MSI */
932 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) }, 950 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
951 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
952 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
953 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
954 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
955 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
956 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
957 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
958 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
959 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
960 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
961 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
962 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
963 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
964 /* Para */
965 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
933 /* Pegatron */ 966 /* Pegatron */
934 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) }, 967 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
935 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) }, 968 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -944,9 +977,15 @@ static struct usb_device_id rt2800usb_device_table[] = {
944 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 977 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
945 /* Sitecom */ 978 /* Sitecom */
946 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 979 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
980 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
947 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) }, 981 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
982 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
983 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
948 /* SMC */ 984 /* SMC */
949 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) }, 985 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
986 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
987 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
988 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
950 /* Zinwell */ 989 /* Zinwell */
951 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) }, 990 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
952 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) }, 991 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -966,6 +1005,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
966 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) }, 1005 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
967 /* Sitecom */ 1006 /* Sitecom */
968 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 1007 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
1008 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
969 /* Zinwell */ 1009 /* Zinwell */
970 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 1010 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
971#endif 1011#endif
@@ -985,18 +1025,14 @@ static struct usb_device_id rt2800usb_device_table[] = {
985 /* Amigo */ 1025 /* Amigo */
986 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 1026 { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
987 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) }, 1027 { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
988 /* Askey */
989 { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
990 /* ASUS */ 1028 /* ASUS */
991 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) }, 1029 { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
992 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) }, 1030 { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
993 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
994 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) }, 1031 { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
995 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) }, 1032 { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
996 /* AzureWave */ 1033 /* AzureWave */
997 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) }, 1034 { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
998 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) }, 1035 { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
999 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
1000 /* Belkin */ 1036 /* Belkin */
1001 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) }, 1037 { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
1002 /* Buffalo */ 1038 /* Buffalo */
@@ -1015,14 +1051,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1015 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1051 { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
1016 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) }, 1052 { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
1017 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) }, 1053 { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
1018 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
1019 /* Encore */ 1054 /* Encore */
1020 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) }, 1055 { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
1021 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
1022 /* EnGenius */
1023 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
1024 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
1025 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
1026 /* Gemtek */ 1056 /* Gemtek */
1027 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) }, 1057 { USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
1028 /* Gigabyte */ 1058 /* Gigabyte */
@@ -1030,9 +1060,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
1030 /* Hawking */ 1060 /* Hawking */
1031 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) }, 1061 { USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
1032 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) }, 1062 { USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
1033 /* I-O DATA */
1034 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
1035 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
1036 /* LevelOne */ 1063 /* LevelOne */
1037 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) }, 1064 { USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
1038 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) }, 1065 { USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1042,20 +1069,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1042 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) }, 1069 { USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
1043 /* Motorola */ 1070 /* Motorola */
1044 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) }, 1071 { USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
1045 /* MSI */
1046 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
1047 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
1048 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
1049 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
1050 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
1051 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
1052 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
1053 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
1054 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
1055 /* Ovislink */ 1072 /* Ovislink */
1056 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) }, 1073 { USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
1057 /* Para */
1058 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
1059 /* Pegatron */ 1074 /* Pegatron */
1060 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) }, 1075 { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
1061 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) }, 1076 { USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -1064,19 +1079,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
1064 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) }, 1079 { USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
1065 /* Qcom */ 1080 /* Qcom */
1066 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) }, 1081 { USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
1067 /* Sitecom */
1068 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
1069 { USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
1070 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
1071 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
1072 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
1073 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
1074 { USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
1075 { USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
1076 /* SMC */ 1082 /* SMC */
1077 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) }, 1083 { USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
1078 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
1079 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
1080 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1084 { USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
1081 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) }, 1085 { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
1082 /* Sweex */ 1086 /* Sweex */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index d9daa9c406fa..4de505b98331 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -177,16 +177,15 @@ struct rt2x00_chip {
177#define RT2573 0x2573 177#define RT2573 0x2573
178#define RT2860 0x2860 /* 2.4GHz PCI/CB */ 178#define RT2860 0x2860 /* 2.4GHz PCI/CB */
179#define RT2870 0x2870 179#define RT2870 0x2870
180#define RT2872 0x2872 180#define RT2872 0x2872 /* WSOC */
181#define RT2880 0x2880 /* WSOC */
182#define RT2883 0x2883 /* WSOC */ 181#define RT2883 0x2883 /* WSOC */
183#define RT2890 0x2890 /* 2.4GHz PCIe */
184#define RT3052 0x3052 /* WSOC */
185#define RT3070 0x3070 182#define RT3070 0x3070
186#define RT3071 0x3071 183#define RT3071 0x3071
187#define RT3090 0x3090 /* 2.4GHz PCIe */ 184#define RT3090 0x3090 /* 2.4GHz PCIe */
188#define RT3390 0x3390 185#define RT3390 0x3390
189#define RT3572 0x3572 186#define RT3572 0x3572
187#define RT3593 0x3593 /* PCIe */
188#define RT3883 0x3883 /* WSOC */
190 189
191 u16 rf; 190 u16 rf;
192 u16 rev; 191 u16 rev;
@@ -930,12 +929,12 @@ static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
930 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev); 929 rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
931} 930}
932 931
933static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt) 932static inline bool rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
934{ 933{
935 return (rt2x00dev->chip.rt == rt); 934 return (rt2x00dev->chip.rt == rt);
936} 935}
937 936
938static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf) 937static inline bool rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
939{ 938{
940 return (rt2x00dev->chip.rf == rf); 939 return (rt2x00dev->chip.rf == rf);
941} 940}
@@ -945,6 +944,24 @@ static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
945 return rt2x00dev->chip.rev; 944 return rt2x00dev->chip.rev;
946} 945}
947 946
947static inline bool rt2x00_rt_rev(struct rt2x00_dev *rt2x00dev,
948 const u16 rt, const u16 rev)
949{
950 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) == rev);
951}
952
953static inline bool rt2x00_rt_rev_lt(struct rt2x00_dev *rt2x00dev,
954 const u16 rt, const u16 rev)
955{
956 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) < rev);
957}
958
959static inline bool rt2x00_rt_rev_gte(struct rt2x00_dev *rt2x00dev,
960 const u16 rt, const u16 rev)
961{
962 return (rt2x00_rt(rt2x00dev, rt) && rt2x00_rev(rt2x00dev) >= rev);
963}
964
948static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev, 965static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
949 enum rt2x00_chip_intf intf) 966 enum rt2x00_chip_intf intf)
950{ 967{
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 432e75f960b7..b9885981f3a8 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2118,6 +2118,14 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2118 } 2118 }
2119} 2119}
2120 2120
2121static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
2122{
2123 struct ieee80211_conf conf = { .flags = 0 };
2124 struct rt2x00lib_conf libconf = { .conf = &conf };
2125
2126 rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
2127}
2128
2121static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) 2129static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2122{ 2130{
2123 struct rt2x00_dev *rt2x00dev = dev_instance; 2131 struct rt2x00_dev *rt2x00dev = dev_instance;
@@ -2165,6 +2173,12 @@ static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance)
2165 rt2x00pci_register_write(rt2x00dev, 2173 rt2x00pci_register_write(rt2x00dev,
2166 M2H_CMD_DONE_CSR, 0xffffffff); 2174 M2H_CMD_DONE_CSR, 0xffffffff);
2167 2175
2176 /*
2177 * 4 - MCU Autowakeup interrupt.
2178 */
2179 if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP))
2180 rt61pci_wakeup(rt2x00dev);
2181
2168 return IRQ_HANDLED; 2182 return IRQ_HANDLED;
2169} 2183}
2170 2184
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index bb58d797fb72..576ea9dd2824 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -861,15 +861,15 @@ static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev,
861 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, 861 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
862 USB_MODE_SLEEP, REGISTER_TIMEOUT); 862 USB_MODE_SLEEP, REGISTER_TIMEOUT);
863 } else { 863 } else {
864 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
865 USB_MODE_WAKEUP, REGISTER_TIMEOUT);
866
867 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg); 864 rt2x00usb_register_read(rt2x00dev, MAC_CSR11, &reg);
868 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); 865 rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0);
869 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); 866 rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0);
870 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); 867 rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0);
871 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); 868 rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0);
872 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); 869 rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg);
870
871 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
872 USB_MODE_WAKEUP, REGISTER_TIMEOUT);
873 } 873 }
874} 874}
875 875
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 2131a442831a..6b46329b732f 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -724,10 +724,10 @@ static void rtl8180_bss_info_changed(struct ieee80211_hw *dev,
724 priv->rf->conf_erp(dev, info); 724 priv->rf->conf_erp(dev, info);
725} 725}
726 726
727static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev, int mc_count, 727static u64 rtl8180_prepare_multicast(struct ieee80211_hw *dev,
728 struct dev_addr_list *mc_list) 728 struct netdev_hw_addr_list *mc_list)
729{ 729{
730 return mc_count; 730 return netdev_hw_addr_list_count(mc_list);
731} 731}
732 732
733static void rtl8180_configure_filter(struct ieee80211_hw *dev, 733static void rtl8180_configure_filter(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 1d30792973f5..738921fda027 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1194,9 +1194,9 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1194} 1194}
1195 1195
1196static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev, 1196static u64 rtl8187_prepare_multicast(struct ieee80211_hw *dev,
1197 int mc_count, struct dev_addr_list *mc_list) 1197 struct netdev_hw_addr_list *mc_list)
1198{ 1198{
1199 return mc_count; 1199 return netdev_hw_addr_list_count(mc_list);
1200} 1200}
1201 1201
1202static void rtl8187_configure_filter(struct ieee80211_hw *dev, 1202static void rtl8187_configure_filter(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 785e0244e305..337fc7bec5a5 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -51,3 +51,27 @@ config WL1271
51 51
52 If you choose to build a module, it'll be called wl1271. Say N if 52 If you choose to build a module, it'll be called wl1271. Say N if
53 unsure. 53 unsure.
54
55config WL1271_SPI
56 tristate "TI wl1271 SPI support"
57 depends on WL1271 && SPI_MASTER
58 ---help---
59 This module adds support for the SPI interface of adapters using
60 TI wl1271 chipset. Select this if your platform is using
61 the SPI bus.
62
63 If you choose to build a module, it'll be called wl1251_spi.
64 Say N if unsure.
65
66config WL1271_SDIO
67 tristate "TI wl1271 SDIO support"
68 depends on WL1271 && MMC && ARM
69 ---help---
70 This module adds support for the SDIO interface of adapters using
71 TI wl1271 chipset. Select this if your platform is using
72 the SDIO bus.
73
74 If you choose to build a module, it'll be called
75 wl1271_sdio. Say N if unsure.
76
77
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index f47ec94c16dc..27ddd2be0a91 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -7,10 +7,12 @@ obj-$(CONFIG_WL1251) += wl1251.o
7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o 7obj-$(CONFIG_WL1251_SPI) += wl1251_spi.o
8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o 8obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
9 9
10wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \ 10wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \
11 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 11 wl1271_event.o wl1271_tx.o wl1271_rx.o \
12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 12 wl1271_ps.o wl1271_acx.o wl1271_boot.o \
13 wl1271_init.o wl1271_debugfs.o wl1271_io.o 13 wl1271_init.o wl1271_debugfs.o
14 14
15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o 15wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
16obj-$(CONFIG_WL1271) += wl1271.o 16obj-$(CONFIG_WL1271) += wl1271.o
17obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o
18obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 37c61c19cae5..4f5f02a26e62 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -256,6 +256,8 @@ struct wl1251_debugfs {
256struct wl1251_if_operations { 256struct wl1251_if_operations {
257 void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len); 257 void (*read)(struct wl1251 *wl, int addr, void *buf, size_t len);
258 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len); 258 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
259 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
260 void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
259 void (*reset)(struct wl1251 *wl); 261 void (*reset)(struct wl1251 *wl);
260 void (*enable_irq)(struct wl1251 *wl); 262 void (*enable_irq)(struct wl1251 *wl);
261 void (*disable_irq)(struct wl1251 *wl); 263 void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_boot.c b/drivers/net/wireless/wl12xx/wl1251_boot.c
index d5ac79aeaa73..2545123931e8 100644
--- a/drivers/net/wireless/wl12xx/wl1251_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1251_boot.c
@@ -497,7 +497,8 @@ int wl1251_boot(struct wl1251 *wl)
497 /* 2. start processing NVS file */ 497 /* 2. start processing NVS file */
498 if (wl->use_eeprom) { 498 if (wl->use_eeprom) {
499 wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR); 499 wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
500 msleep(4000); 500 /* Wait for EEPROM NVS burst read to complete */
501 msleep(40);
501 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM); 502 wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
502 } else { 503 } else {
503 ret = wl1251_boot_upload_nvs(wl); 504 ret = wl1251_boot_upload_nvs(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1251_io.h b/drivers/net/wireless/wl12xx/wl1251_io.h
index b89d2ac62efb..c545e9d5f512 100644
--- a/drivers/net/wireless/wl12xx/wl1251_io.h
+++ b/drivers/net/wireless/wl12xx/wl1251_io.h
@@ -48,6 +48,26 @@ static inline void wl1251_write32(struct wl1251 *wl, int addr, u32 val)
48 wl->if_ops->write(wl, addr, &val, sizeof(u32)); 48 wl->if_ops->write(wl, addr, &val, sizeof(u32));
49} 49}
50 50
51static inline u32 wl1251_read_elp(struct wl1251 *wl, int addr)
52{
53 u32 response;
54
55 if (wl->if_ops->read_elp)
56 wl->if_ops->read_elp(wl, addr, &response);
57 else
58 wl->if_ops->read(wl, addr, &response, sizeof(u32));
59
60 return response;
61}
62
63static inline void wl1251_write_elp(struct wl1251 *wl, int addr, u32 val)
64{
65 if (wl->if_ops->write_elp)
66 wl->if_ops->write_elp(wl, addr, val);
67 else
68 wl->if_ops->write(wl, addr, &val, sizeof(u32));
69}
70
51/* Memory target IO, address is translated to partition 0 */ 71/* Memory target IO, address is translated to partition 0 */
52void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len); 72void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len);
53void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len); 73void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1c8226eee409..4d479708158d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -147,8 +147,8 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
147 u32 elp_reg; 147 u32 elp_reg;
148 148
149 elp_reg = ELPCTRL_WAKE_UP; 149 elp_reg = ELPCTRL_WAKE_UP;
150 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); 150 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
151 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 151 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
152 152
153 if (!(elp_reg & ELPCTRL_WLAN_READY)) 153 if (!(elp_reg & ELPCTRL_WLAN_READY))
154 wl1251_warning("WLAN not ready"); 154 wl1251_warning("WLAN not ready");
@@ -202,8 +202,8 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
202 goto out; 202 goto out;
203 } 203 }
204 204
205 /* No NVS from netlink, try to get it from the filesystem */ 205 if (wl->nvs == NULL && !wl->use_eeprom) {
206 if (wl->nvs == NULL) { 206 /* No NVS from netlink, try to get it from the filesystem */
207 ret = wl1251_fetch_nvs(wl); 207 ret = wl1251_fetch_nvs(wl);
208 if (ret < 0) 208 if (ret < 0)
209 goto out; 209 goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 851dfb65e474..b55cb2bd459a 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -45,7 +45,7 @@ void wl1251_elp_work(struct work_struct *work)
45 goto out; 45 goto out;
46 46
47 wl1251_debug(DEBUG_PSM, "chip to elp"); 47 wl1251_debug(DEBUG_PSM, "chip to elp");
48 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP); 48 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
49 wl->elp = true; 49 wl->elp = true;
50 50
51out: 51out:
@@ -79,9 +79,9 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
79 start = jiffies; 79 start = jiffies;
80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT); 80 timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
81 81
82 wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 82 wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
83 83
84 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 84 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
85 85
86 /* 86 /*
87 * FIXME: we should wait for irq from chip but, as a temporary 87 * FIXME: we should wait for irq from chip but, as a temporary
@@ -93,7 +93,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
93 return -ETIMEDOUT; 93 return -ETIMEDOUT;
94 } 94 }
95 msleep(1); 95 msleep(1);
96 elp_reg = wl1251_read32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR); 96 elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
97 } 97 }
98 98
99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms", 99 wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
diff --git a/drivers/net/wireless/wl12xx/wl1251_sdio.c b/drivers/net/wireless/wl12xx/wl1251_sdio.c
index 9423f22bdced..2051ef06e9ec 100644
--- a/drivers/net/wireless/wl12xx/wl1251_sdio.c
+++ b/drivers/net/wireless/wl12xx/wl1251_sdio.c
@@ -20,20 +20,11 @@
20 * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com) 20 * Copyright (C) 2009 Bob Copeland (me@bobcopeland.com)
21 */ 21 */
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/crc7.h>
24#include <linux/mod_devicetable.h> 23#include <linux/mod_devicetable.h>
25#include <linux/irq.h>
26#include <linux/mmc/sdio_func.h> 24#include <linux/mmc/sdio_func.h>
27#include <linux/mmc/sdio_ids.h> 25#include <linux/mmc/sdio_ids.h>
28#include <linux/platform_device.h>
29 26
30#include "wl1251.h" 27#include "wl1251.h"
31#include "wl12xx_80211.h"
32#include "wl1251_reg.h"
33#include "wl1251_ps.h"
34#include "wl1251_io.h"
35#include "wl1251_tx.h"
36#include "wl1251_debugfs.h"
37 28
38#ifndef SDIO_VENDOR_ID_TI 29#ifndef SDIO_VENDOR_ID_TI
39#define SDIO_VENDOR_ID_TI 0x104c 30#define SDIO_VENDOR_ID_TI 0x104c
@@ -65,7 +56,8 @@ static const struct sdio_device_id wl1251_devices[] = {
65MODULE_DEVICE_TABLE(sdio, wl1251_devices); 56MODULE_DEVICE_TABLE(sdio, wl1251_devices);
66 57
67 58
68void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len) 59static void wl1251_sdio_read(struct wl1251 *wl, int addr,
60 void *buf, size_t len)
69{ 61{
70 int ret; 62 int ret;
71 struct sdio_func *func = wl_to_func(wl); 63 struct sdio_func *func = wl_to_func(wl);
@@ -77,7 +69,8 @@ void wl1251_sdio_read(struct wl1251 *wl, int addr, void *buf, size_t len)
77 sdio_release_host(func); 69 sdio_release_host(func);
78} 70}
79 71
80void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len) 72static void wl1251_sdio_write(struct wl1251 *wl, int addr,
73 void *buf, size_t len)
81{ 74{
82 int ret; 75 int ret;
83 struct sdio_func *func = wl_to_func(wl); 76 struct sdio_func *func = wl_to_func(wl);
@@ -89,7 +82,33 @@ void wl1251_sdio_write(struct wl1251 *wl, int addr, void *buf, size_t len)
89 sdio_release_host(func); 82 sdio_release_host(func);
90} 83}
91 84
92void wl1251_sdio_reset(struct wl1251 *wl) 85static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
86{
87 int ret = 0;
88 struct sdio_func *func = wl_to_func(wl);
89
90 sdio_claim_host(func);
91 *val = sdio_readb(func, addr, &ret);
92 sdio_release_host(func);
93
94 if (ret)
95 wl1251_error("sdio_readb failed (%d)", ret);
96}
97
98static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
99{
100 int ret = 0;
101 struct sdio_func *func = wl_to_func(wl);
102
103 sdio_claim_host(func);
104 sdio_writeb(func, val, addr, &ret);
105 sdio_release_host(func);
106
107 if (ret)
108 wl1251_error("sdio_writeb failed (%d)", ret);
109}
110
111static void wl1251_sdio_reset(struct wl1251 *wl)
93{ 112{
94} 113}
95 114
@@ -111,19 +130,22 @@ static void wl1251_sdio_disable_irq(struct wl1251 *wl)
111 sdio_release_host(func); 130 sdio_release_host(func);
112} 131}
113 132
114void wl1251_sdio_set_power(bool enable) 133static void wl1251_sdio_set_power(bool enable)
115{ 134{
116} 135}
117 136
118struct wl1251_if_operations wl1251_sdio_ops = { 137static const struct wl1251_if_operations wl1251_sdio_ops = {
119 .read = wl1251_sdio_read, 138 .read = wl1251_sdio_read,
120 .write = wl1251_sdio_write, 139 .write = wl1251_sdio_write,
140 .write_elp = wl1251_sdio_write_elp,
141 .read_elp = wl1251_sdio_read_elp,
121 .reset = wl1251_sdio_reset, 142 .reset = wl1251_sdio_reset,
122 .enable_irq = wl1251_sdio_enable_irq, 143 .enable_irq = wl1251_sdio_enable_irq,
123 .disable_irq = wl1251_sdio_disable_irq, 144 .disable_irq = wl1251_sdio_disable_irq,
124}; 145};
125 146
126int wl1251_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) 147static int wl1251_sdio_probe(struct sdio_func *func,
148 const struct sdio_device_id *id)
127{ 149{
128 int ret; 150 int ret;
129 struct wl1251 *wl; 151 struct wl1251 *wl;
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 3bfb59bd4635..e81474203a23 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -310,7 +310,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
310 310
311static struct spi_driver wl1251_spi_driver = { 311static struct spi_driver wl1251_spi_driver = {
312 .driver = { 312 .driver = {
313 .name = "wl1251", 313 .name = DRIVER_NAME,
314 .bus = &spi_bus_type, 314 .bus = &spi_bus_type,
315 .owner = THIS_MODULE, 315 .owner = THIS_MODULE,
316 }, 316 },
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 97ea5096bc8c..75887e74205b 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -53,6 +53,9 @@ enum {
53 DEBUG_MAC80211 = BIT(11), 53 DEBUG_MAC80211 = BIT(11),
54 DEBUG_CMD = BIT(12), 54 DEBUG_CMD = BIT(12),
55 DEBUG_ACX = BIT(13), 55 DEBUG_ACX = BIT(13),
56 DEBUG_SDIO = BIT(14),
57 DEBUG_FILTERS = BIT(15),
58 DEBUG_ADHOC = BIT(16),
56 DEBUG_ALL = ~0, 59 DEBUG_ALL = ~0,
57}; 60};
58 61
@@ -110,6 +113,9 @@ enum {
110#define WL1271_FW_NAME "wl1271-fw.bin" 113#define WL1271_FW_NAME "wl1271-fw.bin"
111#define WL1271_NVS_NAME "wl1271-nvs.bin" 114#define WL1271_NVS_NAME "wl1271-nvs.bin"
112 115
116#define WL1271_TX_SECURITY_LO16(s) ((u16)((s) & 0xffff))
117#define WL1271_TX_SECURITY_HI32(s) ((u32)(((s) >> 16) & 0xffffffff))
118
113/* NVS data structure */ 119/* NVS data structure */
114#define WL1271_NVS_SECTION_SIZE 468 120#define WL1271_NVS_SECTION_SIZE 468
115 121
@@ -142,14 +148,7 @@ struct wl1271_nvs_file {
142 */ 148 */
143#undef WL1271_80211A_ENABLED 149#undef WL1271_80211A_ENABLED
144 150
145/* 151#define WL1271_BUSY_WORD_CNT 1
146 * FIXME: for the wl1271, a busy word count of 1 here will result in a more
147 * optimal SPI interface. There is some SPI bug however, causing RXS time outs
148 * with this mode occasionally on boot, so lets have three for now. A value of
149 * three should make sure, that the chipset will always be ready, though this
150 * will impact throughput and latencies slightly.
151 */
152#define WL1271_BUSY_WORD_CNT 3
153#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32)) 152#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
154 153
155#define WL1271_ELP_HW_STATE_ASLEEP 0 154#define WL1271_ELP_HW_STATE_ASLEEP 0
@@ -334,11 +333,27 @@ struct wl1271_scan {
334 u8 probe_requests; 333 u8 probe_requests;
335}; 334};
336 335
336struct wl1271_if_operations {
337 void (*read)(struct wl1271 *wl, int addr, void *buf, size_t len,
338 bool fixed);
339 void (*write)(struct wl1271 *wl, int addr, void *buf, size_t len,
340 bool fixed);
341 void (*reset)(struct wl1271 *wl);
342 void (*init)(struct wl1271 *wl);
343 void (*power)(struct wl1271 *wl, bool enable);
344 struct device* (*dev)(struct wl1271 *wl);
345 void (*enable_irq)(struct wl1271 *wl);
346 void (*disable_irq)(struct wl1271 *wl);
347};
348
337struct wl1271 { 349struct wl1271 {
350 struct platform_device *plat_dev;
338 struct ieee80211_hw *hw; 351 struct ieee80211_hw *hw;
339 bool mac80211_registered; 352 bool mac80211_registered;
340 353
341 struct spi_device *spi; 354 void *if_priv;
355
356 struct wl1271_if_operations *if_ops;
342 357
343 void (*set_power)(bool enable); 358 void (*set_power)(bool enable);
344 int irq; 359 int irq;
@@ -357,6 +372,9 @@ struct wl1271 {
357#define WL1271_FLAG_IN_ELP (6) 372#define WL1271_FLAG_IN_ELP (6)
358#define WL1271_FLAG_PSM (7) 373#define WL1271_FLAG_PSM (7)
359#define WL1271_FLAG_PSM_REQUESTED (8) 374#define WL1271_FLAG_PSM_REQUESTED (8)
375#define WL1271_FLAG_IRQ_PENDING (9)
376#define WL1271_FLAG_IRQ_RUNNING (10)
377#define WL1271_FLAG_IDLE (11)
360 unsigned long flags; 378 unsigned long flags;
361 379
362 struct wl1271_partition_set part; 380 struct wl1271_partition_set part;
@@ -373,6 +391,7 @@ struct wl1271 {
373 u8 bssid[ETH_ALEN]; 391 u8 bssid[ETH_ALEN];
374 u8 mac_addr[ETH_ALEN]; 392 u8 mac_addr[ETH_ALEN];
375 u8 bss_type; 393 u8 bss_type;
394 u8 set_bss_type;
376 u8 ssid[IW_ESSID_MAX_SIZE + 1]; 395 u8 ssid[IW_ESSID_MAX_SIZE + 1];
377 u8 ssid_len; 396 u8 ssid_len;
378 int channel; 397 int channel;
@@ -382,13 +401,13 @@ struct wl1271 {
382 /* Accounting for allocated / available TX blocks on HW */ 401 /* Accounting for allocated / available TX blocks on HW */
383 u32 tx_blocks_freed[NUM_TX_QUEUES]; 402 u32 tx_blocks_freed[NUM_TX_QUEUES];
384 u32 tx_blocks_available; 403 u32 tx_blocks_available;
385 u8 tx_results_count; 404 u32 tx_results_count;
386 405
387 /* Transmitted TX packets counter for chipset interface */ 406 /* Transmitted TX packets counter for chipset interface */
388 int tx_packets_count; 407 u32 tx_packets_count;
389 408
390 /* Time-offset between host and chipset clocks */ 409 /* Time-offset between host and chipset clocks */
391 int time_offset; 410 s64 time_offset;
392 411
393 /* Session counter for the chipset */ 412 /* Session counter for the chipset */
394 int session_counter; 413 int session_counter;
@@ -403,8 +422,7 @@ struct wl1271 {
403 422
404 /* Security sequence number counters */ 423 /* Security sequence number counters */
405 u8 tx_security_last_seq; 424 u8 tx_security_last_seq;
406 u16 tx_security_seq_16; 425 s64 tx_security_seq;
407 u32 tx_security_seq_32;
408 426
409 /* FW Rx counter */ 427 /* FW Rx counter */
410 u32 rx_counter; 428 u32 rx_counter;
@@ -430,14 +448,19 @@ struct wl1271 {
430 /* currently configured rate set */ 448 /* currently configured rate set */
431 u32 sta_rate_set; 449 u32 sta_rate_set;
432 u32 basic_rate_set; 450 u32 basic_rate_set;
451 u32 basic_rate;
433 u32 rate_set; 452 u32 rate_set;
434 453
435 /* The current band */ 454 /* The current band */
436 enum ieee80211_band band; 455 enum ieee80211_band band;
437 456
457 /* Beaconing interval (needed for ad-hoc) */
458 u32 beacon_int;
459
438 /* Default key (for WEP) */ 460 /* Default key (for WEP) */
439 u32 default_key; 461 u32 default_key;
440 462
463 unsigned int filters;
441 unsigned int rx_config; 464 unsigned int rx_config;
442 unsigned int rx_filter; 465 unsigned int rx_filter;
443 466
@@ -450,6 +473,9 @@ struct wl1271 {
450 /* in dBm */ 473 /* in dBm */
451 int power_level; 474 int power_level;
452 475
476 int rssi_thold;
477 int last_rssi_event;
478
453 struct wl1271_stats stats; 479 struct wl1271_stats stats;
454 struct wl1271_debugfs debugfs; 480 struct wl1271_debugfs debugfs;
455 481
@@ -465,6 +491,8 @@ struct wl1271 {
465 /* Current chipset configuration */ 491 /* Current chipset configuration */
466 struct conf_drv_settings conf; 492 struct conf_drv_settings conf;
467 493
494 bool sg_enabled;
495
468 struct list_head list; 496 struct list_head list;
469}; 497};
470 498
@@ -477,7 +505,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
477 505
478#define WL1271_DEFAULT_POWER_LEVEL 0 506#define WL1271_DEFAULT_POWER_LEVEL 0
479 507
480#define WL1271_TX_QUEUE_MAX_LENGTH 20 508#define WL1271_TX_QUEUE_LOW_WATERMARK 10
509#define WL1271_TX_QUEUE_HIGH_WATERMARK 25
481 510
482/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power 511/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
483 on in case is has been shut down shortly before */ 512 on in case is has been shut down shortly before */
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 308782421fce..2ad086efe06e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -32,7 +32,6 @@
32#include "wl1271.h" 32#include "wl1271.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "wl1271_reg.h" 34#include "wl1271_reg.h"
35#include "wl1271_spi.h"
36#include "wl1271_ps.h" 35#include "wl1271_ps.h"
37 36
38int wl1271_acx_wake_up_conditions(struct wl1271 *wl) 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
@@ -137,12 +136,7 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
137 goto out; 136 goto out;
138 } 137 }
139 138
140 /* 139 acx->current_tx_power = power * 10;
141 * FIXME: This is a workaround needed while we don't the correct
142 * calibration, to avoid distortions
143 */
144 /* acx->current_tx_power = power * 10; */
145 acx->current_tx_power = 120;
146 140
147 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 141 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
148 if (ret < 0) { 142 if (ret < 0) {
@@ -511,12 +505,17 @@ out:
511 return ret; 505 return ret;
512} 506}
513 507
514int wl1271_acx_conn_monit_params(struct wl1271 *wl) 508#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
509
510int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable)
515{ 511{
516 struct acx_conn_monit_params *acx; 512 struct acx_conn_monit_params *acx;
513 u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
514 u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
517 int ret; 515 int ret;
518 516
519 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters"); 517 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
518 enable ? "enabled" : "disabled");
520 519
521 acx = kzalloc(sizeof(*acx), GFP_KERNEL); 520 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
522 if (!acx) { 521 if (!acx) {
@@ -524,8 +523,13 @@ int wl1271_acx_conn_monit_params(struct wl1271 *wl)
524 goto out; 523 goto out;
525 } 524 }
526 525
527 acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold); 526 if (enable) {
528 acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout); 527 threshold = wl->conf.conn.synch_fail_thold;
528 timeout = wl->conf.conn.bss_lose_timeout;
529 }
530
531 acx->synch_fail_thold = cpu_to_le32(threshold);
532 acx->bss_lose_timeout = cpu_to_le32(timeout);
529 533
530 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, 534 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
531 acx, sizeof(*acx)); 535 acx, sizeof(*acx));
@@ -541,7 +545,7 @@ out:
541} 545}
542 546
543 547
544int wl1271_acx_sg_enable(struct wl1271 *wl) 548int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
545{ 549{
546 struct acx_bt_wlan_coex *pta; 550 struct acx_bt_wlan_coex *pta;
547 int ret; 551 int ret;
@@ -554,7 +558,10 @@ int wl1271_acx_sg_enable(struct wl1271 *wl)
554 goto out; 558 goto out;
555 } 559 }
556 560
557 pta->enable = SG_ENABLE; 561 if (enable)
562 pta->enable = wl->conf.sg.state;
563 else
564 pta->enable = CONF_SG_DISABLE;
558 565
559 ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); 566 ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
560 if (ret < 0) { 567 if (ret < 0) {
@@ -571,7 +578,7 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
571{ 578{
572 struct acx_bt_wlan_coex_param *param; 579 struct acx_bt_wlan_coex_param *param;
573 struct conf_sg_settings *c = &wl->conf.sg; 580 struct conf_sg_settings *c = &wl->conf.sg;
574 int ret; 581 int i, ret;
575 582
576 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 583 wl1271_debug(DEBUG_ACX, "acx sg cfg");
577 584
@@ -582,19 +589,9 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
582 } 589 }
583 590
584 /* BT-WLAN coext parameters */ 591 /* BT-WLAN coext parameters */
585 param->per_threshold = cpu_to_le32(c->per_threshold); 592 for (i = 0; i < CONF_SG_PARAMS_MAX; i++)
586 param->max_scan_compensation_time = 593 param->params[i] = c->params[i];
587 cpu_to_le32(c->max_scan_compensation_time); 594 param->param_idx = CONF_SG_PARAMS_ALL;
588 param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
589 param->load_ratio = c->load_ratio;
590 param->auto_ps_mode = c->auto_ps_mode;
591 param->probe_req_compensation = c->probe_req_compensation;
592 param->scan_window_compensation = c->scan_window_compensation;
593 param->antenna_config = c->antenna_config;
594 param->beacon_miss_threshold = c->beacon_miss_threshold;
595 param->rate_adaptation_threshold =
596 cpu_to_le32(c->rate_adaptation_threshold);
597 param->rate_adaptation_snr = c->rate_adaptation_snr;
598 595
599 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 596 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
600 if (ret < 0) { 597 if (ret < 0) {
@@ -806,7 +803,7 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
806 803
807 /* configure one basic rate class */ 804 /* configure one basic rate class */
808 idx = ACX_TX_BASIC_RATE; 805 idx = ACX_TX_BASIC_RATE;
809 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set); 806 acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate);
810 acx->rate_class[idx].short_retry_limit = c->short_retry_limit; 807 acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
811 acx->rate_class[idx].long_retry_limit = c->long_retry_limit; 808 acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
812 acx->rate_class[idx].aflags = c->aflags; 809 acx->rate_class[idx].aflags = c->aflags;
@@ -1143,3 +1140,129 @@ out:
1143 kfree(acx); 1140 kfree(acx);
1144 return ret; 1141 return ret;
1145} 1142}
1143
1144int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable)
1145{
1146 struct wl1271_acx_keep_alive_mode *acx = NULL;
1147 int ret = 0;
1148
1149 wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
1150
1151 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1152 if (!acx) {
1153 ret = -ENOMEM;
1154 goto out;
1155 }
1156
1157 acx->enabled = enable;
1158
1159 ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
1160 if (ret < 0) {
1161 wl1271_warning("acx keep alive mode failed: %d", ret);
1162 goto out;
1163 }
1164
1165out:
1166 kfree(acx);
1167 return ret;
1168}
1169
1170int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid)
1171{
1172 struct wl1271_acx_keep_alive_config *acx = NULL;
1173 int ret = 0;
1174
1175 wl1271_debug(DEBUG_ACX, "acx keep alive config");
1176
1177 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1178 if (!acx) {
1179 ret = -ENOMEM;
1180 goto out;
1181 }
1182
1183 acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
1184 acx->index = index;
1185 acx->tpl_validation = tpl_valid;
1186 acx->trigger = ACX_KEEP_ALIVE_NO_TX;
1187
1188 ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
1189 acx, sizeof(*acx));
1190 if (ret < 0) {
1191 wl1271_warning("acx keep alive config failed: %d", ret);
1192 goto out;
1193 }
1194
1195out:
1196 kfree(acx);
1197 return ret;
1198}
1199
1200int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1201 s16 thold, u8 hyst)
1202{
1203 struct wl1271_acx_rssi_snr_trigger *acx = NULL;
1204 int ret = 0;
1205
1206 wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
1207
1208 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1209 if (!acx) {
1210 ret = -ENOMEM;
1211 goto out;
1212 }
1213
1214 wl->last_rssi_event = -1;
1215
1216 acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
1217 acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
1218 acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
1219 if (enable)
1220 acx->enable = WL1271_ACX_TRIG_ENABLE;
1221 else
1222 acx->enable = WL1271_ACX_TRIG_DISABLE;
1223
1224 acx->index = WL1271_ACX_TRIG_IDX_RSSI;
1225 acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
1226 acx->threshold = cpu_to_le16(thold);
1227 acx->hysteresis = hyst;
1228
1229 ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
1230 if (ret < 0) {
1231 wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
1232 goto out;
1233 }
1234
1235out:
1236 kfree(acx);
1237 return ret;
1238}
1239
1240int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl)
1241{
1242 struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
1243 struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
1244 int ret = 0;
1245
1246 wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
1247
1248 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1249 if (!acx) {
1250 ret = -ENOMEM;
1251 goto out;
1252 }
1253
1254 acx->rssi_beacon = c->avg_weight_rssi_beacon;
1255 acx->rssi_data = c->avg_weight_rssi_data;
1256 acx->snr_beacon = c->avg_weight_snr_beacon;
1257 acx->snr_data = c->avg_weight_snr_data;
1258
1259 ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
1260 if (ret < 0) {
1261 wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
1262 goto out;
1263 }
1264
1265out:
1266 kfree(acx);
1267 return ret;
1268}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index aeccc98581eb..420e7e2fc021 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -392,81 +392,27 @@ struct acx_conn_monit_params {
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */ 392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed)); 393} __attribute__ ((packed));
394 394
395enum {
396 SG_ENABLE = 0,
397 SG_DISABLE,
398 SG_SENSE_NO_ACTIVITY,
399 SG_SENSE_ACTIVE
400};
401
402struct acx_bt_wlan_coex { 395struct acx_bt_wlan_coex {
403 struct acx_header header; 396 struct acx_header header;
404 397
405 /*
406 * 0 -> PTA enabled
407 * 1 -> PTA disabled
408 * 2 -> sense no active mode, i.e.
409 * an interrupt is sent upon
410 * BT activity.
411 * 3 -> PTA is switched on in response
412 * to the interrupt sending.
413 */
414 u8 enable; 398 u8 enable;
415 u8 pad[3]; 399 u8 pad[3];
416} __attribute__ ((packed)); 400} __attribute__ ((packed));
417 401
418struct acx_dco_itrim_params { 402struct acx_bt_wlan_coex_param {
419 struct acx_header header; 403 struct acx_header header;
420 404
421 u8 enable; 405 __le32 params[CONF_SG_PARAMS_MAX];
406 u8 param_idx;
422 u8 padding[3]; 407 u8 padding[3];
423 __le32 timeout;
424} __attribute__ ((packed)); 408} __attribute__ ((packed));
425 409
426#define PTA_ANTENNA_TYPE_DEF (0) 410struct acx_dco_itrim_params {
427#define PTA_BT_HP_MAXTIME_DEF (2000)
428#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
429#define PTA_SENSE_DISABLE_TIMER_DEF (1350)
430#define PTA_PROTECTIVE_RX_TIME_DEF (1500)
431#define PTA_PROTECTIVE_TX_TIME_DEF (1500)
432#define PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF (3000)
433#define PTA_SIGNALING_TYPE_DEF (1)
434#define PTA_AFH_LEVERAGE_ON_DEF (0)
435#define PTA_NUMBER_QUIET_CYCLE_DEF (0)
436#define PTA_MAX_NUM_CTS_DEF (3)
437#define PTA_NUMBER_OF_WLAN_PACKETS_DEF (2)
438#define PTA_NUMBER_OF_BT_PACKETS_DEF (2)
439#define PTA_PROTECTIVE_RX_TIME_FAST_DEF (1500)
440#define PTA_PROTECTIVE_TX_TIME_FAST_DEF (3000)
441#define PTA_CYCLE_TIME_FAST_DEF (8700)
442#define PTA_RX_FOR_AVALANCHE_DEF (5)
443#define PTA_ELP_HP_DEF (0)
444#define PTA_ANTI_STARVE_PERIOD_DEF (500)
445#define PTA_ANTI_STARVE_NUM_CYCLE_DEF (4)
446#define PTA_ALLOW_PA_SD_DEF (1)
447#define PTA_TIME_BEFORE_BEACON_DEF (6300)
448#define PTA_HPDM_MAX_TIME_DEF (1600)
449#define PTA_TIME_OUT_NEXT_WLAN_DEF (2550)
450#define PTA_AUTO_MODE_NO_CTS_DEF (0)
451#define PTA_BT_HP_RESPECTED_DEF (3)
452#define PTA_WLAN_RX_MIN_RATE_DEF (24)
453#define PTA_ACK_MODE_DEF (1)
454
455struct acx_bt_wlan_coex_param {
456 struct acx_header header; 411 struct acx_header header;
457 412
458 __le32 per_threshold; 413 u8 enable;
459 __le32 max_scan_compensation_time;
460 __le16 nfs_sample_interval;
461 u8 load_ratio;
462 u8 auto_ps_mode;
463 u8 probe_req_compensation;
464 u8 scan_window_compensation;
465 u8 antenna_config;
466 u8 beacon_miss_threshold;
467 __le32 rate_adaptation_threshold;
468 s8 rate_adaptation_snr;
469 u8 padding[3]; 414 u8 padding[3];
415 __le32 timeout;
470} __attribute__ ((packed)); 416} __attribute__ ((packed));
471 417
472struct acx_energy_detection { 418struct acx_energy_detection {
@@ -969,6 +915,84 @@ struct wl1271_acx_pm_config {
969 u8 padding[3]; 915 u8 padding[3];
970} __attribute__ ((packed)); 916} __attribute__ ((packed));
971 917
918struct wl1271_acx_keep_alive_mode {
919 struct acx_header header;
920
921 u8 enabled;
922 u8 padding[3];
923} __attribute__ ((packed));
924
925enum {
926 ACX_KEEP_ALIVE_NO_TX = 0,
927 ACX_KEEP_ALIVE_PERIOD_ONLY
928};
929
930enum {
931 ACX_KEEP_ALIVE_TPL_INVALID = 0,
932 ACX_KEEP_ALIVE_TPL_VALID
933};
934
935struct wl1271_acx_keep_alive_config {
936 struct acx_header header;
937
938 __le32 period;
939 u8 index;
940 u8 tpl_validation;
941 u8 trigger;
942 u8 padding;
943} __attribute__ ((packed));
944
945enum {
946 WL1271_ACX_TRIG_TYPE_LEVEL = 0,
947 WL1271_ACX_TRIG_TYPE_EDGE,
948};
949
950enum {
951 WL1271_ACX_TRIG_DIR_LOW = 0,
952 WL1271_ACX_TRIG_DIR_HIGH,
953 WL1271_ACX_TRIG_DIR_BIDIR,
954};
955
956enum {
957 WL1271_ACX_TRIG_ENABLE = 1,
958 WL1271_ACX_TRIG_DISABLE,
959};
960
961enum {
962 WL1271_ACX_TRIG_METRIC_RSSI_BEACON = 0,
963 WL1271_ACX_TRIG_METRIC_RSSI_DATA,
964 WL1271_ACX_TRIG_METRIC_SNR_BEACON,
965 WL1271_ACX_TRIG_METRIC_SNR_DATA,
966};
967
968enum {
969 WL1271_ACX_TRIG_IDX_RSSI = 0,
970 WL1271_ACX_TRIG_COUNT = 8,
971};
972
973struct wl1271_acx_rssi_snr_trigger {
974 struct acx_header header;
975
976 __le16 threshold;
977 __le16 pacing; /* 0 - 60000 ms */
978 u8 metric;
979 u8 type;
980 u8 dir;
981 u8 hysteresis;
982 u8 index;
983 u8 enable;
984 u8 padding[2];
985};
986
987struct wl1271_acx_rssi_snr_avg_weights {
988 struct acx_header header;
989
990 u8 rssi_beacon;
991 u8 rssi_data;
992 u8 snr_beacon;
993 u8 snr_data;
994};
995
972enum { 996enum {
973 ACX_WAKE_UP_CONDITIONS = 0x0002, 997 ACX_WAKE_UP_CONDITIONS = 0x0002,
974 ACX_MEM_CFG = 0x0003, 998 ACX_MEM_CFG = 0x0003,
@@ -1017,8 +1041,8 @@ enum {
1017 ACX_FRAG_CFG = 0x004F, 1041 ACX_FRAG_CFG = 0x004F,
1018 ACX_BET_ENABLE = 0x0050, 1042 ACX_BET_ENABLE = 0x0050,
1019 ACX_RSSI_SNR_TRIGGER = 0x0051, 1043 ACX_RSSI_SNR_TRIGGER = 0x0051,
1020 ACX_RSSI_SNR_WEIGHTS = 0x0051, 1044 ACX_RSSI_SNR_WEIGHTS = 0x0052,
1021 ACX_KEEP_ALIVE_MODE = 0x0052, 1045 ACX_KEEP_ALIVE_MODE = 0x0053,
1022 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054, 1046 ACX_SET_KEEP_ALIVE_CONFIG = 0x0054,
1023 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055, 1047 ACX_BA_SESSION_RESPONDER_POLICY = 0x0055,
1024 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056, 1048 ACX_BA_SESSION_INITIATOR_POLICY = 0x0056,
@@ -1058,8 +1082,8 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1058int wl1271_acx_dco_itrim_params(struct wl1271 *wl); 1082int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter); 1083int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1084int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl); 1085int wl1271_acx_conn_monit_params(struct wl1271 *wl, bool enable);
1062int wl1271_acx_sg_enable(struct wl1271 *wl); 1086int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable);
1063int wl1271_acx_sg_cfg(struct wl1271 *wl); 1087int wl1271_acx_sg_cfg(struct wl1271 *wl);
1064int wl1271_acx_cca_threshold(struct wl1271 *wl); 1088int wl1271_acx_cca_threshold(struct wl1271 *wl);
1065int wl1271_acx_bcn_dtim_options(struct wl1271 *wl); 1089int wl1271_acx_bcn_dtim_options(struct wl1271 *wl);
@@ -1085,5 +1109,10 @@ int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1085int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address, 1109int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1086 u8 version); 1110 u8 version);
1087int wl1271_acx_pm_config(struct wl1271 *wl); 1111int wl1271_acx_pm_config(struct wl1271 *wl);
1112int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
1113int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
1114int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1115 s16 thold, u8 hyst);
1116int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1088 1117
1089#endif /* __WL1271_ACX_H__ */ 1118#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 024356263065..8087dc17f29d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 2008-2009 Nokia Corporation 4 * Copyright (C) 2008-2010 Nokia Corporation
5 * 5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
@@ -27,7 +27,6 @@
27#include "wl1271_acx.h" 27#include "wl1271_acx.h"
28#include "wl1271_reg.h" 28#include "wl1271_reg.h"
29#include "wl1271_boot.h" 29#include "wl1271_boot.h"
30#include "wl1271_spi.h"
31#include "wl1271_io.h" 30#include "wl1271_io.h"
32#include "wl1271_event.h" 31#include "wl1271_event.h"
33 32
@@ -230,6 +229,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
230 nvs_len = sizeof(wl->nvs->nvs); 229 nvs_len = sizeof(wl->nvs->nvs);
231 nvs_ptr = (u8 *)wl->nvs->nvs; 230 nvs_ptr = (u8 *)wl->nvs->nvs;
232 231
232 /* update current MAC address to NVS */
233 nvs_ptr[11] = wl->mac_addr[0];
234 nvs_ptr[10] = wl->mac_addr[1];
235 nvs_ptr[6] = wl->mac_addr[2];
236 nvs_ptr[5] = wl->mac_addr[3];
237 nvs_ptr[4] = wl->mac_addr[4];
238 nvs_ptr[3] = wl->mac_addr[5];
239
233 /* 240 /*
234 * Layout before the actual NVS tables: 241 * Layout before the actual NVS tables:
235 * 1 byte : burst length. 242 * 1 byte : burst length.
@@ -300,7 +307,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
300 307
301static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 308static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
302{ 309{
303 enable_irq(wl->irq); 310 wl1271_enable_interrupts(wl);
304 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, 311 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
305 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 312 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
306 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 313 wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
@@ -404,7 +411,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
404 /* unmask required mbox events */ 411 /* unmask required mbox events */
405 wl->event_mask = BSS_LOSE_EVENT_ID | 412 wl->event_mask = BSS_LOSE_EVENT_ID |
406 SCAN_COMPLETE_EVENT_ID | 413 SCAN_COMPLETE_EVENT_ID |
407 PS_REPORT_EVENT_ID; 414 PS_REPORT_EVENT_ID |
415 JOIN_EVENT_COMPLETE_ID |
416 DISCONNECT_EVENT_COMPLETE_ID |
417 RSSI_SNR_TRIGGER_0_EVENT_ID;
408 418
409 ret = wl1271_event_unmask(wl); 419 ret = wl1271_event_unmask(wl);
410 if (ret < 0) { 420 if (ret < 0) {
@@ -445,11 +455,15 @@ int wl1271_boot(struct wl1271 *wl)
445 455
446 if (REF_CLOCK != 0) { 456 if (REF_CLOCK != 0) {
447 u16 val; 457 u16 val;
448 /* Set clock type */ 458 /* Set clock type (open drain) */
449 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 459 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
450 val &= FREF_CLK_TYPE_BITS; 460 val &= FREF_CLK_TYPE_BITS;
451 val |= CLK_REQ_PRCM;
452 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val); 461 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
462
463 /* Set clock pull mode (no pull) */
464 val = wl1271_top_reg_read(wl, OCP_REG_CLK_PULL);
465 val |= NO_PULL;
466 wl1271_top_reg_write(wl, OCP_REG_CLK_PULL, val);
453 } else { 467 } else {
454 u16 val; 468 u16 val;
455 /* Set clock polarity */ 469 /* Set clock polarity */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index 412443ee655a..95ecc5241959 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -53,10 +53,13 @@ struct wl1271_static_data {
53#define OCP_REG_POLARITY 0x0064 53#define OCP_REG_POLARITY 0x0064
54#define OCP_REG_CLK_TYPE 0x0448 54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_REG_CLK_POLARITY 0x0cb2 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_REG_CLK_PULL 0x0cb4
56 57
57#define CMD_MBOX_ADDRESS 0x407B4
58 58
59#define POLARITY_LOW BIT(1) 59#define CMD_MBOX_ADDRESS 0x407B4
60
61#define POLARITY_LOW BIT(1)
62#define NO_PULL (BIT(14) | BIT(15))
60 63
61#define FREF_CLK_TYPE_BITS 0xfffffe7f 64#define FREF_CLK_TYPE_BITS 0xfffffe7f
62#define CLK_REQ_PRCM 0x100 65#define CLK_REQ_PRCM 0x100
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index e7832f3318eb..6b5ba8ec94c9 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * This file is part of wl1271 2 * This file is part of wl1271
3 * 3 *
4 * Copyright (C) 2009 Nokia Corporation 4 * Copyright (C) 2009-2010 Nokia Corporation
5 * 5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 * 7 *
@@ -26,15 +26,18 @@
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/etherdevice.h> 28#include <linux/etherdevice.h>
29#include <linux/ieee80211.h>
29#include <linux/slab.h> 30#include <linux/slab.h>
30 31
31#include "wl1271.h" 32#include "wl1271.h"
32#include "wl1271_reg.h" 33#include "wl1271_reg.h"
33#include "wl1271_spi.h"
34#include "wl1271_io.h" 34#include "wl1271_io.h"
35#include "wl1271_acx.h" 35#include "wl1271_acx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_cmd.h" 37#include "wl1271_cmd.h"
38#include "wl1271_event.h"
39
40#define WL1271_CMD_POLL_COUNT 5
38 41
39/* 42/*
40 * send command to firmware 43 * send command to firmware
@@ -52,6 +55,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
52 u32 intr; 55 u32 intr;
53 int ret = 0; 56 int ret = 0;
54 u16 status; 57 u16 status;
58 u16 poll_count = 0;
55 59
56 cmd = buf; 60 cmd = buf;
57 cmd->id = cpu_to_le16(id); 61 cmd->id = cpu_to_le16(id);
@@ -73,7 +77,11 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
73 goto out; 77 goto out;
74 } 78 }
75 79
76 msleep(1); 80 udelay(10);
81 poll_count++;
82 if (poll_count == WL1271_CMD_POLL_COUNT)
83 wl1271_info("cmd polling took over %d cycles",
84 poll_count);
77 85
78 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 86 intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
79 } 87 }
@@ -249,7 +257,36 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
249 return ret; 257 return ret;
250} 258}
251 259
252int wl1271_cmd_join(struct wl1271 *wl) 260/*
261 * Poll the mailbox event field until any of the bits in the mask is set or a
262 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
263 */
264static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
265{
266 u32 events_vector, event;
267 unsigned long timeout;
268
269 timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
270
271 do {
272 if (time_after(jiffies, timeout))
273 return -ETIMEDOUT;
274
275 msleep(1);
276
277 /* read from both event fields */
278 wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
279 sizeof(events_vector), false);
280 event = events_vector & mask;
281 wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
282 sizeof(events_vector), false);
283 event |= events_vector & mask;
284 } while (!event);
285
286 return 0;
287}
288
289int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type)
253{ 290{
254 static bool do_cal = true; 291 static bool do_cal = true;
255 struct wl1271_cmd_join *join; 292 struct wl1271_cmd_join *join;
@@ -280,30 +317,13 @@ int wl1271_cmd_join(struct wl1271 *wl)
280 317
281 join->rx_config_options = cpu_to_le32(wl->rx_config); 318 join->rx_config_options = cpu_to_le32(wl->rx_config);
282 join->rx_filter_options = cpu_to_le32(wl->rx_filter); 319 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
283 join->bss_type = wl->bss_type; 320 join->bss_type = bss_type;
321 join->basic_rate_set = wl->basic_rate_set;
284 322
285 /* 323 if (wl->band == IEEE80211_BAND_5GHZ)
286 * FIXME: disable temporarily all filters because after commit
287 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
288 * association. The filter logic needs to be implemented properly
289 * and once that is done, this hack can be removed.
290 */
291 join->rx_config_options = cpu_to_le32(0);
292 join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
293
294 if (wl->band == IEEE80211_BAND_2GHZ)
295 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
296 CONF_HW_BIT_RATE_2MBPS |
297 CONF_HW_BIT_RATE_5_5MBPS |
298 CONF_HW_BIT_RATE_11MBPS);
299 else {
300 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ; 324 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
301 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
302 CONF_HW_BIT_RATE_12MBPS |
303 CONF_HW_BIT_RATE_24MBPS);
304 }
305 325
306 join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT); 326 join->beacon_interval = cpu_to_le16(wl->beacon_int);
307 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD; 327 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
308 328
309 join->channel = wl->channel; 329 join->channel = wl->channel;
@@ -320,8 +340,7 @@ int wl1271_cmd_join(struct wl1271 *wl)
320 340
321 /* reset TX security counters */ 341 /* reset TX security counters */
322 wl->tx_security_last_seq = 0; 342 wl->tx_security_last_seq = 0;
323 wl->tx_security_seq_16 = 0; 343 wl->tx_security_seq = 0;
324 wl->tx_security_seq_32 = 0;
325 344
326 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0); 345 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
327 if (ret < 0) { 346 if (ret < 0) {
@@ -329,11 +348,9 @@ int wl1271_cmd_join(struct wl1271 *wl)
329 goto out_free; 348 goto out_free;
330 } 349 }
331 350
332 /* 351 ret = wl1271_cmd_wait_for_event(wl, JOIN_EVENT_COMPLETE_ID);
333 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 352 if (ret < 0)
334 * simplify locking we just sleep instead, for now 353 wl1271_error("cmd join event completion error");
335 */
336 msleep(10);
337 354
338out_free: 355out_free:
339 kfree(join); 356 kfree(join);
@@ -465,7 +482,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
465 if (ret < 0) { 482 if (ret < 0) {
466 wl1271_error("tx %s cmd for channel %d failed", 483 wl1271_error("tx %s cmd for channel %d failed",
467 enable ? "start" : "stop", cmd->channel); 484 enable ? "start" : "stop", cmd->channel);
468 return ret; 485 goto out;
469 } 486 }
470 487
471 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d", 488 wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
@@ -549,25 +566,29 @@ out:
549 return ret; 566 return ret;
550} 567}
551 568
552int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 569int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
553 u8 active_scan, u8 high_prio, u8 band, 570 const u8 *ie, size_t ie_len, u8 active_scan,
554 u8 probe_requests) 571 u8 high_prio, u8 band, u8 probe_requests)
555{ 572{
556 573
557 struct wl1271_cmd_trigger_scan_to *trigger = NULL; 574 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
558 struct wl1271_cmd_scan *params = NULL; 575 struct wl1271_cmd_scan *params = NULL;
559 struct ieee80211_channel *channels; 576 struct ieee80211_channel *channels;
577 u32 rate;
560 int i, j, n_ch, ret; 578 int i, j, n_ch, ret;
561 u16 scan_options = 0; 579 u16 scan_options = 0;
562 u8 ieee_band; 580 u8 ieee_band;
563 581
564 if (band == WL1271_SCAN_BAND_2_4_GHZ) 582 if (band == WL1271_SCAN_BAND_2_4_GHZ) {
565 ieee_band = IEEE80211_BAND_2GHZ; 583 ieee_band = IEEE80211_BAND_2GHZ;
566 else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) 584 rate = wl->conf.tx.basic_rate;
585 } else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled()) {
567 ieee_band = IEEE80211_BAND_2GHZ; 586 ieee_band = IEEE80211_BAND_2GHZ;
568 else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) 587 rate = wl->conf.tx.basic_rate;
588 } else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled()) {
569 ieee_band = IEEE80211_BAND_5GHZ; 589 ieee_band = IEEE80211_BAND_5GHZ;
570 else 590 rate = wl->conf.tx.basic_rate_5;
591 } else
571 return -EINVAL; 592 return -EINVAL;
572 593
573 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL) 594 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
@@ -594,8 +615,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
594 params->params.scan_options = cpu_to_le16(scan_options); 615 params->params.scan_options = cpu_to_le16(scan_options);
595 616
596 params->params.num_probe_requests = probe_requests; 617 params->params.num_probe_requests = probe_requests;
597 /* Let the fw autodetect suitable tx_rate for probes */ 618 params->params.tx_rate = rate;
598 params->params.tx_rate = 0;
599 params->params.tid_trigger = 0; 619 params->params.tid_trigger = 0;
600 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 620 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
601 621
@@ -622,12 +642,13 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
622 642
623 params->params.num_channels = j; 643 params->params.num_channels = j;
624 644
625 if (len && ssid) { 645 if (ssid_len && ssid) {
626 params->params.ssid_len = len; 646 params->params.ssid_len = ssid_len;
627 memcpy(params->params.ssid, ssid, len); 647 memcpy(params->params.ssid, ssid, ssid_len);
628 } 648 }
629 649
630 ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band); 650 ret = wl1271_cmd_build_probe_req(wl, ssid, ssid_len,
651 ie, ie_len, ieee_band);
631 if (ret < 0) { 652 if (ret < 0) {
632 wl1271_error("PROBE request template failed"); 653 wl1271_error("PROBE request template failed");
633 goto out; 654 goto out;
@@ -658,9 +679,9 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
658 wl->scan.active = active_scan; 679 wl->scan.active = active_scan;
659 wl->scan.high_prio = high_prio; 680 wl->scan.high_prio = high_prio;
660 wl->scan.probe_requests = probe_requests; 681 wl->scan.probe_requests = probe_requests;
661 if (len && ssid) { 682 if (ssid_len && ssid) {
662 wl->scan.ssid_len = len; 683 wl->scan.ssid_len = ssid_len;
663 memcpy(wl->scan.ssid, ssid, len); 684 memcpy(wl->scan.ssid, ssid, ssid_len);
664 } else 685 } else
665 wl->scan.ssid_len = 0; 686 wl->scan.ssid_len = 0;
666 } 687 }
@@ -675,11 +696,12 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
675 696
676out: 697out:
677 kfree(params); 698 kfree(params);
699 kfree(trigger);
678 return ret; 700 return ret;
679} 701}
680 702
681int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 703int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
682 void *buf, size_t buf_len) 704 void *buf, size_t buf_len, int index, u32 rates)
683{ 705{
684 struct wl1271_cmd_template_set *cmd; 706 struct wl1271_cmd_template_set *cmd;
685 int ret = 0; 707 int ret = 0;
@@ -697,9 +719,10 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
697 719
698 cmd->len = cpu_to_le16(buf_len); 720 cmd->len = cpu_to_le16(buf_len);
699 cmd->template_type = template_id; 721 cmd->template_type = template_id;
700 cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates); 722 cmd->enabled_rates = cpu_to_le32(rates);
701 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit; 723 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
702 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit; 724 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
725 cmd->index = index;
703 726
704 if (buf) 727 if (buf)
705 memcpy(cmd->template_data, buf, buf_len); 728 memcpy(cmd->template_data, buf, buf_len);
@@ -717,155 +740,129 @@ out:
717 return ret; 740 return ret;
718} 741}
719 742
720static int wl1271_build_basic_rates(u8 *rates, u8 band) 743int wl1271_cmd_build_null_data(struct wl1271 *wl)
721{ 744{
722 u8 index = 0; 745 struct sk_buff *skb = NULL;
723 746 int size;
724 if (band == IEEE80211_BAND_2GHZ) { 747 void *ptr;
725 rates[index++] = 748 int ret = -ENOMEM;
726 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
727 rates[index++] =
728 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
729 rates[index++] =
730 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
731 rates[index++] =
732 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
733 } else if (band == IEEE80211_BAND_5GHZ) {
734 rates[index++] =
735 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
736 rates[index++] =
737 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
738 rates[index++] =
739 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
740 } else {
741 wl1271_error("build_basic_rates invalid band: %d", band);
742 }
743 749
744 return index;
745}
746 750
747static int wl1271_build_extended_rates(u8 *rates, u8 band) 751 if (wl->bss_type == BSS_TYPE_IBSS) {
748{ 752 size = sizeof(struct wl12xx_null_data_template);
749 u8 index = 0; 753 ptr = NULL;
750
751 if (band == IEEE80211_BAND_2GHZ) {
752 rates[index++] = IEEE80211_OFDM_RATE_6MB;
753 rates[index++] = IEEE80211_OFDM_RATE_9MB;
754 rates[index++] = IEEE80211_OFDM_RATE_12MB;
755 rates[index++] = IEEE80211_OFDM_RATE_18MB;
756 rates[index++] = IEEE80211_OFDM_RATE_24MB;
757 rates[index++] = IEEE80211_OFDM_RATE_36MB;
758 rates[index++] = IEEE80211_OFDM_RATE_48MB;
759 rates[index++] = IEEE80211_OFDM_RATE_54MB;
760 } else if (band == IEEE80211_BAND_5GHZ) {
761 rates[index++] =
762 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
763 rates[index++] =
764 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
765 rates[index++] =
766 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
767 rates[index++] =
768 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
769 rates[index++] =
770 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
771 rates[index++] =
772 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
773 } else { 754 } else {
774 wl1271_error("build_basic_rates invalid band: %d", band); 755 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
756 if (!skb)
757 goto out;
758 size = skb->len;
759 ptr = skb->data;
775 } 760 }
776 761
777 return index; 762 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, ptr, size, 0,
763 WL1271_RATE_AUTOMATIC);
764
765out:
766 dev_kfree_skb(skb);
767 if (ret)
768 wl1271_warning("cmd buld null data failed %d", ret);
769
770 return ret;
771
778} 772}
779 773
780int wl1271_cmd_build_null_data(struct wl1271 *wl) 774int wl1271_cmd_build_klv_null_data(struct wl1271 *wl)
781{ 775{
782 struct wl12xx_null_data_template template; 776 struct sk_buff *skb = NULL;
777 int ret = -ENOMEM;
783 778
784 if (!is_zero_ether_addr(wl->bssid)) { 779 skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
785 memcpy(template.header.da, wl->bssid, ETH_ALEN); 780 if (!skb)
786 memcpy(template.header.bssid, wl->bssid, ETH_ALEN); 781 goto out;
787 } else { 782
788 memset(template.header.da, 0xff, ETH_ALEN); 783 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV,
789 memset(template.header.bssid, 0xff, ETH_ALEN); 784 skb->data, skb->len,
790 } 785 CMD_TEMPL_KLV_IDX_NULL_DATA,
786 WL1271_RATE_AUTOMATIC);
791 787
792 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 788out:
793 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 789 dev_kfree_skb(skb);
794 IEEE80211_STYPE_NULLFUNC | 790 if (ret)
795 IEEE80211_FCTL_TODS); 791 wl1271_warning("cmd build klv null data failed %d", ret);
796 792
797 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, 793 return ret;
798 sizeof(template));
799 794
800} 795}
801 796
802int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid) 797int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
803{ 798{
804 struct wl12xx_ps_poll_template template; 799 struct sk_buff *skb;
805 800 int ret = 0;
806 memcpy(template.bssid, wl->bssid, ETH_ALEN);
807 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
808
809 /* aid in PS-Poll has its two MSBs each set to 1 */
810 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
811 801
812 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 802 skb = ieee80211_pspoll_get(wl->hw, wl->vif);
803 if (!skb)
804 goto out;
813 805
814 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, 806 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, skb->data,
815 sizeof(template)); 807 skb->len, 0, wl->basic_rate);
816 808
809out:
810 dev_kfree_skb(skb);
811 return ret;
817} 812}
818 813
819int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, 814int wl1271_cmd_build_probe_req(struct wl1271 *wl,
820 u8 band) 815 const u8 *ssid, size_t ssid_len,
816 const u8 *ie, size_t ie_len, u8 band)
821{ 817{
822 struct wl12xx_probe_req_template template; 818 struct sk_buff *skb;
823 struct wl12xx_ie_rates *rates;
824 char *ptr;
825 u16 size;
826 int ret; 819 int ret;
827 820
828 ptr = (char *)&template; 821 skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
829 size = sizeof(struct ieee80211_header); 822 ie, ie_len);
830 823 if (!skb) {
831 memset(template.header.da, 0xff, ETH_ALEN); 824 ret = -ENOMEM;
832 memset(template.header.bssid, 0xff, ETH_ALEN); 825 goto out;
833 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 826 }
834 template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ); 827
835 828 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", skb->data, skb->len);
836 /* IEs */
837 /* SSID */
838 template.ssid.header.id = WLAN_EID_SSID;
839 template.ssid.header.len = ssid_len;
840 if (ssid_len && ssid)
841 memcpy(template.ssid.ssid, ssid, ssid_len);
842 size += sizeof(struct wl12xx_ie_header) + ssid_len;
843 ptr += size;
844
845 /* Basic Rates */
846 rates = (struct wl12xx_ie_rates *)ptr;
847 rates->header.id = WLAN_EID_SUPP_RATES;
848 rates->header.len = wl1271_build_basic_rates(rates->rates, band);
849 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
850 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
851
852 /* Extended rates */
853 rates = (struct wl12xx_ie_rates *)ptr;
854 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
855 rates->header.len = wl1271_build_extended_rates(rates->rates, band);
856 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
857
858 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
859 829
860 if (band == IEEE80211_BAND_2GHZ) 830 if (band == IEEE80211_BAND_2GHZ)
861 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 831 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
862 &template, size); 832 skb->data, skb->len, 0,
833 wl->conf.tx.basic_rate);
863 else 834 else
864 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 835 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
865 &template, size); 836 skb->data, skb->len, 0,
837 wl->conf.tx.basic_rate_5);
838
839out:
840 dev_kfree_skb(skb);
866 return ret; 841 return ret;
867} 842}
868 843
844int wl1271_build_qos_null_data(struct wl1271 *wl)
845{
846 struct ieee80211_qos_hdr template;
847
848 memset(&template, 0, sizeof(template));
849
850 memcpy(template.addr1, wl->bssid, ETH_ALEN);
851 memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
852 memcpy(template.addr3, wl->bssid, ETH_ALEN);
853
854 template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
855 IEEE80211_STYPE_QOS_NULLFUNC |
856 IEEE80211_FCTL_TODS);
857
858 /* FIXME: not sure what priority to use here */
859 template.qos_ctrl = cpu_to_le16(0);
860
861 return wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, &template,
862 sizeof(template), 0,
863 WL1271_RATE_AUTOMATIC);
864}
865
869int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 866int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
870{ 867{
871 struct wl1271_cmd_set_keys *cmd; 868 struct wl1271_cmd_set_keys *cmd;
@@ -976,6 +973,10 @@ int wl1271_cmd_disconnect(struct wl1271 *wl)
976 goto out_free; 973 goto out_free;
977 } 974 }
978 975
976 ret = wl1271_cmd_wait_for_event(wl, DISCONNECT_EVENT_COMPLETE_ID);
977 if (ret < 0)
978 wl1271_error("cmd disconnect event completion error");
979
979out_free: 980out_free:
980 kfree(cmd); 981 kfree(cmd);
981 982
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 2dc06c73532b..00f78b7aa384 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -33,7 +33,7 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33 size_t res_len); 33 size_t res_len);
34int wl1271_cmd_general_parms(struct wl1271 *wl); 34int wl1271_cmd_general_parms(struct wl1271 *wl);
35int wl1271_cmd_radio_parms(struct wl1271 *wl); 35int wl1271_cmd_radio_parms(struct wl1271 *wl);
36int wl1271_cmd_join(struct wl1271 *wl); 36int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type);
37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 37int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 38int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 39int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -41,15 +41,18 @@ int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send); 41int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 42int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
43 size_t len); 43 size_t len);
44int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 44int wl1271_cmd_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
45 u8 active_scan, u8 high_prio, u8 band, 45 const u8 *ie, size_t ie_len, u8 active_scan,
46 u8 probe_requests); 46 u8 high_prio, u8 band, u8 probe_requests);
47int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 47int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
48 void *buf, size_t buf_len); 48 void *buf, size_t buf_len, int index, u32 rates);
49int wl1271_cmd_build_null_data(struct wl1271 *wl); 49int wl1271_cmd_build_null_data(struct wl1271 *wl);
50int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 50int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
51int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len, 51int wl1271_cmd_build_probe_req(struct wl1271 *wl,
52 u8 band); 52 const u8 *ssid, size_t ssid_len,
53 const u8 *ie, size_t ie_len, u8 band);
54int wl1271_build_qos_null_data(struct wl1271 *wl);
55int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
53int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 56int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
54int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 57int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
55 u8 key_size, const u8 *key, const u8 *addr, 58 u8 key_size, const u8 *key, const u8 *addr,
@@ -99,6 +102,11 @@ enum wl1271_commands {
99 102
100#define MAX_CMD_PARAMS 572 103#define MAX_CMD_PARAMS 572
101 104
105enum {
106 CMD_TEMPL_KLV_IDX_NULL_DATA = 0,
107 CMD_TEMPL_KLV_IDX_MAX = 4
108};
109
102enum cmd_templ { 110enum cmd_templ {
103 CMD_TEMPL_NULL_DATA = 0, 111 CMD_TEMPL_NULL_DATA = 0,
104 CMD_TEMPL_BEACON, 112 CMD_TEMPL_BEACON,
@@ -121,6 +129,7 @@ enum cmd_templ {
121/* unit ms */ 129/* unit ms */
122#define WL1271_COMMAND_TIMEOUT 2000 130#define WL1271_COMMAND_TIMEOUT 2000
123#define WL1271_CMD_TEMPL_MAX_SIZE 252 131#define WL1271_CMD_TEMPL_MAX_SIZE 252
132#define WL1271_EVENT_TIMEOUT 100
124 133
125struct wl1271_cmd_header { 134struct wl1271_cmd_header {
126 __le16 id; 135 __le16 id;
@@ -243,6 +252,8 @@ struct cmd_enabledisable_path {
243 u8 padding[3]; 252 u8 padding[3];
244} __attribute__ ((packed)); 253} __attribute__ ((packed));
245 254
255#define WL1271_RATE_AUTOMATIC 0
256
246struct wl1271_cmd_template_set { 257struct wl1271_cmd_template_set {
247 struct wl1271_cmd_header header; 258 struct wl1271_cmd_header header;
248 259
@@ -509,6 +520,8 @@ enum wl1271_disconnect_type {
509}; 520};
510 521
511struct wl1271_cmd_disconnect { 522struct wl1271_cmd_disconnect {
523 struct wl1271_cmd_header header;
524
512 __le32 rx_config_options; 525 __le32 rx_config_options;
513 __le32 rx_filter_options; 526 __le32 rx_filter_options;
514 527
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 6f9e75cc5640..c44307c4bcf8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -65,110 +65,344 @@ enum {
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS, 65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66}; 66};
67 67
68struct conf_sg_settings { 68enum {
69 CONF_HW_RXTX_RATE_MCS7 = 0,
70 CONF_HW_RXTX_RATE_MCS6,
71 CONF_HW_RXTX_RATE_MCS5,
72 CONF_HW_RXTX_RATE_MCS4,
73 CONF_HW_RXTX_RATE_MCS3,
74 CONF_HW_RXTX_RATE_MCS2,
75 CONF_HW_RXTX_RATE_MCS1,
76 CONF_HW_RXTX_RATE_MCS0,
77 CONF_HW_RXTX_RATE_54,
78 CONF_HW_RXTX_RATE_48,
79 CONF_HW_RXTX_RATE_36,
80 CONF_HW_RXTX_RATE_24,
81 CONF_HW_RXTX_RATE_22,
82 CONF_HW_RXTX_RATE_18,
83 CONF_HW_RXTX_RATE_12,
84 CONF_HW_RXTX_RATE_11,
85 CONF_HW_RXTX_RATE_9,
86 CONF_HW_RXTX_RATE_6,
87 CONF_HW_RXTX_RATE_5_5,
88 CONF_HW_RXTX_RATE_2,
89 CONF_HW_RXTX_RATE_1,
90 CONF_HW_RXTX_RATE_MAX,
91 CONF_HW_RXTX_RATE_UNSUPPORTED = 0xff
92};
93
94enum {
95 CONF_SG_DISABLE = 0,
96 CONF_SG_PROTECTIVE,
97 CONF_SG_OPPORTUNISTIC
98};
99
100enum {
69 /* 101 /*
70 * Defines the PER threshold in PPM of the BT voice of which reaching 102 * PER threshold in PPM of the BT voice
71 * this value will trigger raising the priority of the BT voice by
72 * the BT IP until next NFS sample interval time as defined in
73 * nfs_sample_interval.
74 * 103 *
75 * Unit: PER value in PPM (parts per million) 104 * Range: 0 - 10000000
76 * #Error_packets / #Total_packets 105 */
106 CONF_SG_BT_PER_THRESHOLD = 0,
77 107
78 * Range: u32 108 /*
109 * Number of consequent RX_ACTIVE activities to override BT voice
110 * frames to ensure WLAN connection
111 *
112 * Range: 0 - 100
113 */
114 CONF_SG_HV3_MAX_OVERRIDE,
115
116 /*
117 * Defines the PER threshold of the BT voice
118 *
119 * Range: 0 - 65000
120 */
121 CONF_SG_BT_NFS_SAMPLE_INTERVAL,
122
123 /*
124 * Defines the load ratio of BT
125 *
126 * Range: 0 - 100 (%)
127 */
128 CONF_SG_BT_LOAD_RATIO,
129
130 /*
131 * Defines whether the SG will force WLAN host to enter/exit PSM
132 *
133 * Range: 1 - SG can force, 0 - host handles PSM
134 */
135 CONF_SG_AUTO_PS_MODE,
136
137 /*
138 * Compensation percentage of probe requests when scan initiated
139 * during BT voice/ACL link.
140 *
141 * Range: 0 - 255 (%)
142 */
143 CONF_SG_AUTO_SCAN_PROBE_REQ,
144
145 /*
146 * Compensation percentage of probe requests when active scan initiated
147 * during BT voice
148 *
149 * Range: 0 - 255 (%)
150 */
151 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3,
152
153 /*
154 * Defines antenna configuration (single/dual antenna)
155 *
156 * Range: 0 - single antenna, 1 - dual antenna
157 */
158 CONF_SG_ANTENNA_CONFIGURATION,
159
160 /*
161 * The threshold (percent) of max consequtive beacon misses before
162 * increasing priority of beacon reception.
163 *
164 * Range: 0 - 100 (%)
165 */
166 CONF_SG_BEACON_MISS_PERCENT,
167
168 /*
169 * The rate threshold below which receiving a data frame from the AP
170 * will increase the priority of the data frame above BT traffic.
171 *
172 * Range: 0,2, 5(=5.5), 6, 9, 11, 12, 18, 24, 36, 48, 54
173 */
174 CONF_SG_RATE_ADAPT_THRESH,
175
176 /*
177 * Not used currently.
178 *
179 * Range: 0
180 */
181 CONF_SG_RATE_ADAPT_SNR,
182
183 /*
184 * Configure the min and max time BT gains the antenna
185 * in WLAN PSM / BT master basic rate
186 *
187 * Range: 0 - 255 (ms)
188 */
189 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR,
190 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR,
191
192 /*
193 * The time after it expires no new WLAN trigger frame is trasmitted
194 * in WLAN PSM / BT master basic rate
195 *
196 * Range: 0 - 255 (ms)
197 */
198 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR,
199
200 /*
201 * Configure the min and max time BT gains the antenna
202 * in WLAN PSM / BT slave basic rate
203 *
204 * Range: 0 - 255 (ms)
205 */
206 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR,
207 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR,
208
209 /*
210 * The time after it expires no new WLAN trigger frame is trasmitted
211 * in WLAN PSM / BT slave basic rate
212 *
213 * Range: 0 - 255 (ms)
214 */
215 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR,
216
217 /*
218 * Configure the min and max time BT gains the antenna
219 * in WLAN PSM / BT master EDR
220 *
221 * Range: 0 - 255 (ms)
222 */
223 CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR,
224 CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR,
225
226 /*
227 * The time after it expires no new WLAN trigger frame is trasmitted
228 * in WLAN PSM / BT master EDR
229 *
230 * Range: 0 - 255 (ms)
231 */
232 CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR,
233
234 /*
235 * Configure the min and max time BT gains the antenna
236 * in WLAN PSM / BT slave EDR
237 *
238 * Range: 0 - 255 (ms)
239 */
240 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR,
241 CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR,
242
243 /*
244 * The time after it expires no new WLAN trigger frame is trasmitted
245 * in WLAN PSM / BT slave EDR
246 *
247 * Range: 0 - 255 (ms)
248 */
249 CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR,
250
251 /*
252 * RX guard time before the beginning of a new BT voice frame during
253 * which no new WLAN trigger frame is transmitted.
254 *
255 * Range: 0 - 100000 (us)
256 */
257 CONF_SG_RXT,
258
259 /*
260 * TX guard time before the beginning of a new BT voice frame during
261 * which no new WLAN frame is transmitted.
262 *
263 * Range: 0 - 100000 (us)
264 */
265
266 CONF_SG_TXT,
267
268 /*
269 * Enable adaptive RXT/TXT algorithm. If disabled, the host values
270 * will be utilized.
271 *
272 * Range: 0 - disable, 1 - enable
273 */
274 CONF_SG_ADAPTIVE_RXT_TXT,
275
276 /*
277 * The used WLAN legacy service period during active BT ACL link
278 *
279 * Range: 0 - 255 (ms)
280 */
281 CONF_SG_PS_POLL_TIMEOUT,
282
283 /*
284 * The used WLAN UPSD service period during active BT ACL link
285 *
286 * Range: 0 - 255 (ms)
79 */ 287 */
80 u32 per_threshold; 288 CONF_SG_UPSD_TIMEOUT,
81 289
82 /* 290 /*
83 * This value is an absolute time in micro-seconds to limit the 291 * Configure the min and max time BT gains the antenna
84 * maximum scan duration compensation while in SG 292 * in WLAN Active / BT master EDR
293 *
294 * Range: 0 - 255 (ms)
85 */ 295 */
86 u32 max_scan_compensation_time; 296 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR,
297 CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR,
87 298
88 /* Defines the PER threshold of the BT voice of which reaching this 299 /*
89 * value will trigger raising the priority of the BT voice until next 300 * The maximum time WLAN can gain the antenna for
90 * NFS sample interval time as defined in sample_interval. 301 * in WLAN Active / BT master EDR
91 * 302 *
92 * Unit: msec 303 * Range: 0 - 255 (ms)
93 * Range: 1-65000
94 */ 304 */
95 u16 nfs_sample_interval; 305 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR,
96 306
97 /* 307 /*
98 * Defines the load ratio for the BT. 308 * Configure the min and max time BT gains the antenna
99 * The WLAN ratio is: 100 - load_ratio 309 * in WLAN Active / BT slave EDR
100 * 310 *
101 * Unit: Percent 311 * Range: 0 - 255 (ms)
102 * Range: 0-100
103 */ 312 */
104 u8 load_ratio; 313 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR,
314 CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR,
105 315
106 /* 316 /*
107 * true - Co-ex is allowed to enter/exit P.S automatically and 317 * The maximum time WLAN can gain the antenna for
108 * transparently to the host 318 * in WLAN Active / BT slave EDR
109 * 319 *
110 * false - Co-ex is disallowed to enter/exit P.S and will trigger an 320 * Range: 0 - 255 (ms)
111 * event to the host to notify for the need to enter/exit P.S 321 */
112 * due to BT change state 322 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR,
323
324 /*
325 * Configure the min and max time BT gains the antenna
326 * in WLAN Active / BT basic rate
327 *
328 * Range: 0 - 255 (ms)
329 */
330 CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR,
331 CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR,
332
333 /*
334 * The maximum time WLAN can gain the antenna for
335 * in WLAN Active / BT basic rate
113 * 336 *
337 * Range: 0 - 255 (ms)
114 */ 338 */
115 u8 auto_ps_mode; 339 CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR,
116 340
117 /* 341 /*
118 * This parameter defines the compensation percentage of num of probe 342 * Compensation percentage of WLAN passive scan window if initiated
119 * requests in case scan is initiated during BT voice/BT ACL 343 * during BT voice
120 * guaranteed link.
121 * 344 *
122 * Unit: Percent 345 * Range: 0 - 1000 (%)
123 * Range: 0-255 (0 - No compensation)
124 */ 346 */
125 u8 probe_req_compensation; 347 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3,
126 348
127 /* 349 /*
128 * This parameter defines the compensation percentage of scan window 350 * Compensation percentage of WLAN passive scan window if initiated
129 * size in case scan is initiated during BT voice/BT ACL Guaranteed 351 * during BT A2DP
130 * link.
131 * 352 *
132 * Unit: Percent 353 * Range: 0 - 1000 (%)
133 * Range: 0-255 (0 - No compensation)
134 */ 354 */
135 u8 scan_window_compensation; 355 CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP,
136 356
137 /* 357 /*
138 * Defines the antenna configuration. 358 * Fixed time ensured for BT traffic to gain the antenna during WLAN
359 * passive scan.
139 * 360 *
140 * Range: 0 - Single Antenna; 1 - Dual Antenna 361 * Range: 0 - 1000 ms
141 */ 362 */
142 u8 antenna_config; 363 CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME,
143 364
144 /* 365 /*
145 * The percent out of the Max consecutive beacon miss roaming trigger 366 * Fixed time ensured for WLAN traffic to gain the antenna during WLAN
146 * which is the threshold for raising the priority of beacon 367 * passive scan.
147 * reception.
148 * 368 *
149 * Range: 1-100 369 * Range: 0 - 1000 ms
150 * N = MaxConsecutiveBeaconMiss
151 * P = coexMaxConsecutiveBeaconMissPrecent
152 * Threshold = MIN( N-1, round(N * P / 100))
153 */ 370 */
154 u8 beacon_miss_threshold; 371 CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME,
155 372
156 /* 373 /*
157 * The RX rate threshold below which rate adaptation is assumed to be 374 * Number of consequent BT voice frames not interrupted by WLAN
158 * occurring at the AP which will raise priority for ACTIVE_RX and RX
159 * SP.
160 * 375 *
161 * Range: HW_BIT_RATE_* 376 * Range: 0 - 100
162 */ 377 */
163 u32 rate_adaptation_threshold; 378 CONF_SG_HV3_MAX_SERVED,
164 379
165 /* 380 /*
166 * The SNR above which the RX rate threshold indicating AP rate 381 * Protection time of the DHCP procedure.
167 * adaptation is valid
168 * 382 *
169 * Range: -128 - 127 383 * Range: 0 - 100000 (ms)
170 */ 384 */
171 s8 rate_adaptation_snr; 385 CONF_SG_DHCP_TIME,
386
387 /*
388 * Compensation percentage of WLAN active scan window if initiated
389 * during BT A2DP
390 *
391 * Range: 0 - 1000 (%)
392 */
393 CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP,
394 CONF_SG_TEMP_PARAM_1,
395 CONF_SG_TEMP_PARAM_2,
396 CONF_SG_TEMP_PARAM_3,
397 CONF_SG_TEMP_PARAM_4,
398 CONF_SG_TEMP_PARAM_5,
399 CONF_SG_PARAMS_MAX,
400 CONF_SG_PARAMS_ALL = 0xff
401};
402
403struct conf_sg_settings {
404 __le32 params[CONF_SG_PARAMS_MAX];
405 u8 state;
172}; 406};
173 407
174enum conf_rx_queue_type { 408enum conf_rx_queue_type {
@@ -440,6 +674,19 @@ struct conf_tx_settings {
440 */ 674 */
441 u16 tx_compl_threshold; 675 u16 tx_compl_threshold;
442 676
677 /*
678 * The rate used for control messages and scanning on the 2.4GHz band
679 *
680 * Range: CONF_HW_BIT_RATE_* bit mask
681 */
682 u32 basic_rate;
683
684 /*
685 * The rate used for control messages and scanning on the 5GHz band
686 *
687 * Range: CONF_HW_BIT_RATE_* bit mask
688 */
689 u32 basic_rate_5;
443}; 690};
444 691
445enum { 692enum {
@@ -509,65 +756,6 @@ enum {
509 CONF_TRIG_EVENT_DIR_BIDIR 756 CONF_TRIG_EVENT_DIR_BIDIR
510}; 757};
511 758
512
513struct conf_sig_trigger {
514 /*
515 * The RSSI / SNR threshold value.
516 *
517 * FIXME: what is the range?
518 */
519 s16 threshold;
520
521 /*
522 * Minimum delay between two trigger events for this trigger in ms.
523 *
524 * Range: 0 - 60000
525 */
526 u16 pacing;
527
528 /*
529 * The measurement data source for this trigger.
530 *
531 * Range: CONF_TRIG_METRIC_*
532 */
533 u8 metric;
534
535 /*
536 * The trigger type of this trigger.
537 *
538 * Range: CONF_TRIG_EVENT_TYPE_*
539 */
540 u8 type;
541
542 /*
543 * The direction of the trigger.
544 *
545 * Range: CONF_TRIG_EVENT_DIR_*
546 */
547 u8 direction;
548
549 /*
550 * Hysteresis range of the trigger around the threshold (in dB)
551 *
552 * Range: u8
553 */
554 u8 hysteresis;
555
556 /*
557 * Index of the trigger rule.
558 *
559 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
560 */
561 u8 index;
562
563 /*
564 * Enable / disable this rule (to use for clearing rules.)
565 *
566 * Range: 1 - Enabled, 2 - Not enabled
567 */
568 u8 enable;
569};
570
571struct conf_sig_weights { 759struct conf_sig_weights {
572 760
573 /* 761 /*
@@ -686,12 +874,6 @@ struct conf_conn_settings {
686 u8 ps_poll_threshold; 874 u8 ps_poll_threshold;
687 875
688 /* 876 /*
689 * Configuration of signal (rssi/snr) triggers.
690 */
691 u8 sig_trigger_count;
692 struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
693
694 /*
695 * Configuration of signal average weights. 877 * Configuration of signal average weights.
696 */ 878 */
697 struct conf_sig_weights sig_weights; 879 struct conf_sig_weights sig_weights;
@@ -721,6 +903,22 @@ struct conf_conn_settings {
721 * Range 0 - 255 903 * Range 0 - 255
722 */ 904 */
723 u8 psm_entry_retries; 905 u8 psm_entry_retries;
906
907 /*
908 *
909 * Specifies the interval of the connection keep-alive null-func
910 * frame in ms.
911 *
912 * Range: 1000 - 3600000
913 */
914 u32 keep_alive_interval;
915
916 /*
917 * Maximum listen interval supported by the driver in units of beacons.
918 *
919 * Range: u16
920 */
921 u8 max_listen_interval;
724}; 922};
725 923
726enum { 924enum {
@@ -782,6 +980,43 @@ struct conf_pm_config_settings {
782 bool host_fast_wakeup_support; 980 bool host_fast_wakeup_support;
783}; 981};
784 982
983struct conf_roam_trigger_settings {
984 /*
985 * The minimum interval between two trigger events.
986 *
987 * Range: 0 - 60000 ms
988 */
989 u16 trigger_pacing;
990
991 /*
992 * The weight for rssi/beacon average calculation
993 *
994 * Range: 0 - 255
995 */
996 u8 avg_weight_rssi_beacon;
997
998 /*
999 * The weight for rssi/data frame average calculation
1000 *
1001 * Range: 0 - 255
1002 */
1003 u8 avg_weight_rssi_data;
1004
1005 /*
1006 * The weight for snr/beacon average calculation
1007 *
1008 * Range: 0 - 255
1009 */
1010 u8 avg_weight_snr_beacon;
1011
1012 /*
1013 * The weight for snr/data frame average calculation
1014 *
1015 * Range: 0 - 255
1016 */
1017 u8 avg_weight_snr_data;
1018};
1019
785struct conf_drv_settings { 1020struct conf_drv_settings {
786 struct conf_sg_settings sg; 1021 struct conf_sg_settings sg;
787 struct conf_rx_settings rx; 1022 struct conf_rx_settings rx;
@@ -790,6 +1025,7 @@ struct conf_drv_settings {
790 struct conf_init_settings init; 1025 struct conf_init_settings init;
791 struct conf_itrim_settings itrim; 1026 struct conf_itrim_settings itrim;
792 struct conf_pm_config_settings pm_config; 1027 struct conf_pm_config_settings pm_config;
1028 struct conf_roam_trigger_settings roam_trigger;
793}; 1029};
794 1030
795#endif 1031#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index 3f7ff8d0cf5a..c239ef4d0b8d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -29,6 +29,7 @@
29#include "wl1271.h" 29#include "wl1271.h"
30#include "wl1271_acx.h" 30#include "wl1271_acx.h"
31#include "wl1271_ps.h" 31#include "wl1271_ps.h"
32#include "wl1271_io.h"
32 33
33/* ms */ 34/* ms */
34#define WL1271_DEBUGFS_STATS_LIFETIME 1000 35#define WL1271_DEBUGFS_STATS_LIFETIME 1000
@@ -277,13 +278,10 @@ static ssize_t gpio_power_write(struct file *file,
277 goto out; 278 goto out;
278 } 279 }
279 280
280 if (value) { 281 if (value)
281 wl->set_power(true); 282 wl1271_power_on(wl);
282 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags); 283 else
283 } else { 284 wl1271_power_off(wl);
284 wl->set_power(false);
285 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
286 }
287 285
288out: 286out:
289 mutex_unlock(&wl->mutex); 287 mutex_unlock(&wl->mutex);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index 7468ef10194b..cf37aa6eb137 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -23,7 +23,6 @@
23 23
24#include "wl1271.h" 24#include "wl1271.h"
25#include "wl1271_reg.h" 25#include "wl1271_reg.h"
26#include "wl1271_spi.h"
27#include "wl1271_io.h" 26#include "wl1271_io.h"
28#include "wl1271_event.h" 27#include "wl1271_event.h"
29#include "wl1271_ps.h" 28#include "wl1271_ps.h"
@@ -32,34 +31,24 @@
32static int wl1271_event_scan_complete(struct wl1271 *wl, 31static int wl1271_event_scan_complete(struct wl1271 *wl,
33 struct event_mailbox *mbox) 32 struct event_mailbox *mbox)
34{ 33{
35 int size = sizeof(struct wl12xx_probe_req_template);
36 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 34 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
37 mbox->scheduled_scan_status); 35 mbox->scheduled_scan_status);
38 36
39 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) { 37 if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
40 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) { 38 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
41 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
42 NULL, size);
43 /* 2.4 GHz band scanned, scan 5 GHz band, pretend 39 /* 2.4 GHz band scanned, scan 5 GHz band, pretend
44 * to the wl1271_cmd_scan function that we are not 40 * to the wl1271_cmd_scan function that we are not
45 * scanning as it checks that. 41 * scanning as it checks that.
46 */ 42 */
47 clear_bit(WL1271_FLAG_SCANNING, &wl->flags); 43 clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
44 /* FIXME: ie missing! */
48 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len, 45 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
46 NULL, 0,
49 wl->scan.active, 47 wl->scan.active,
50 wl->scan.high_prio, 48 wl->scan.high_prio,
51 WL1271_SCAN_BAND_5_GHZ, 49 WL1271_SCAN_BAND_5_GHZ,
52 wl->scan.probe_requests); 50 wl->scan.probe_requests);
53 } else { 51 } else {
54 if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
55 wl1271_cmd_template_set(wl,
56 CMD_TEMPL_CFG_PROBE_REQ_2_4,
57 NULL, size);
58 else
59 wl1271_cmd_template_set(wl,
60 CMD_TEMPL_CFG_PROBE_REQ_5,
61 NULL, size);
62
63 mutex_unlock(&wl->mutex); 52 mutex_unlock(&wl->mutex);
64 ieee80211_scan_completed(wl->hw, false); 53 ieee80211_scan_completed(wl->hw, false);
65 mutex_lock(&wl->mutex); 54 mutex_lock(&wl->mutex);
@@ -92,16 +81,9 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
92 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 81 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
93 true); 82 true);
94 } else { 83 } else {
95 wl1271_error("PSM entry failed, giving up.\n"); 84 wl1271_info("No ack to nullfunc from AP.");
96 /* FIXME: this may need to be reconsidered. for now it
97 is not possible to indicate to the mac80211
98 afterwards that PSM entry failed. To maximize
99 functionality (receiving data and remaining
100 associated) make sure that we are in sync with the
101 AP in regard of PSM mode. */
102 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
103 false);
104 wl->psm_entry_retry = 0; 85 wl->psm_entry_retry = 0;
86 *beacon_loss = true;
105 } 87 }
106 break; 88 break;
107 case EVENT_ENTER_POWER_SAVE_SUCCESS: 89 case EVENT_ENTER_POWER_SAVE_SUCCESS:
@@ -143,6 +125,24 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
143 return ret; 125 return ret;
144} 126}
145 127
128static void wl1271_event_rssi_trigger(struct wl1271 *wl,
129 struct event_mailbox *mbox)
130{
131 enum nl80211_cqm_rssi_threshold_event event;
132 s8 metric = mbox->rssi_snr_trigger_metric[0];
133
134 wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
135
136 if (metric <= wl->rssi_thold)
137 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
138 else
139 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
140
141 if (event != wl->last_rssi_event)
142 ieee80211_cqm_rssi_notify(wl->vif, event, GFP_KERNEL);
143 wl->last_rssi_event = event;
144}
145
146static void wl1271_event_mbox_dump(struct event_mailbox *mbox) 146static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
147{ 147{
148 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); 148 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -172,10 +172,13 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
172 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon 172 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
173 * filtering) is enabled. Without PSM, the stack will receive all 173 * filtering) is enabled. Without PSM, the stack will receive all
174 * beacons and can detect beacon loss by itself. 174 * beacons and can detect beacon loss by itself.
175 *
176 * As there's possibility that the driver disables PSM before receiving
177 * BSS_LOSE_EVENT, beacon loss has to be reported to the stack.
178 *
175 */ 179 */
176 if (vector & BSS_LOSE_EVENT_ID && 180 if (vector & BSS_LOSE_EVENT_ID) {
177 test_bit(WL1271_FLAG_PSM, &wl->flags)) { 181 wl1271_info("Beacon loss detected.");
178 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
179 182
180 /* indicate to the stack, that beacons have been lost */ 183 /* indicate to the stack, that beacons have been lost */
181 beacon_loss = true; 184 beacon_loss = true;
@@ -188,17 +191,15 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
188 return ret; 191 return ret;
189 } 192 }
190 193
191 if (wl->vif && beacon_loss) { 194 if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID) {
192 /* Obviously, it's dangerous to release the mutex while 195 wl1271_debug(DEBUG_EVENT, "RSSI_SNR_TRIGGER_0_EVENT");
193 we are holding many of the variables in the wl struct. 196 if (wl->vif)
194 That's why it's done last in the function, and care must 197 wl1271_event_rssi_trigger(wl, mbox);
195 be taken that nothing more is done after this function
196 returns. */
197 mutex_unlock(&wl->mutex);
198 ieee80211_beacon_loss(wl->vif);
199 mutex_lock(&wl->mutex);
200 } 198 }
201 199
200 if (wl->vif && beacon_loss)
201 ieee80211_connection_loss(wl->vif);
202
202 return 0; 203 return 0;
203} 204}
204 205
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 278f9206aa56..58371008f270 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -38,6 +38,14 @@
38 */ 38 */
39 39
40enum { 40enum {
41 RSSI_SNR_TRIGGER_0_EVENT_ID = BIT(0),
42 RSSI_SNR_TRIGGER_1_EVENT_ID = BIT(1),
43 RSSI_SNR_TRIGGER_2_EVENT_ID = BIT(2),
44 RSSI_SNR_TRIGGER_3_EVENT_ID = BIT(3),
45 RSSI_SNR_TRIGGER_4_EVENT_ID = BIT(4),
46 RSSI_SNR_TRIGGER_5_EVENT_ID = BIT(5),
47 RSSI_SNR_TRIGGER_6_EVENT_ID = BIT(6),
48 RSSI_SNR_TRIGGER_7_EVENT_ID = BIT(7),
41 MEASUREMENT_START_EVENT_ID = BIT(8), 49 MEASUREMENT_START_EVENT_ID = BIT(8),
42 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9), 50 MEASUREMENT_COMPLETE_EVENT_ID = BIT(9),
43 SCAN_COMPLETE_EVENT_ID = BIT(10), 51 SCAN_COMPLETE_EVENT_ID = BIT(10),
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index d189e8fe05a6..4447af1557f5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -52,50 +52,65 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
52 52
53int wl1271_init_templates_config(struct wl1271 *wl) 53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 54{
55 int ret; 55 int ret, i;
56 56
57 /* send empty templates for fw memory reservation */ 57 /* send empty templates for fw memory reservation */
58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, 58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
59 sizeof(struct wl12xx_probe_req_template)); 59 sizeof(struct wl12xx_probe_req_template),
60 0, WL1271_RATE_AUTOMATIC);
60 if (ret < 0) 61 if (ret < 0)
61 return ret; 62 return ret;
62 63
63 if (wl1271_11a_enabled()) { 64 if (wl1271_11a_enabled()) {
65 size_t size = sizeof(struct wl12xx_probe_req_template);
64 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
65 NULL, 67 NULL, size, 0,
66 sizeof(struct wl12xx_probe_req_template)); 68 WL1271_RATE_AUTOMATIC);
67 if (ret < 0) 69 if (ret < 0)
68 return ret; 70 return ret;
69 } 71 }
70 72
71 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 73 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
72 sizeof(struct wl12xx_null_data_template)); 74 sizeof(struct wl12xx_null_data_template),
75 0, WL1271_RATE_AUTOMATIC);
73 if (ret < 0) 76 if (ret < 0)
74 return ret; 77 return ret;
75 78
76 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL, 79 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, NULL,
77 sizeof(struct wl12xx_ps_poll_template)); 80 sizeof(struct wl12xx_ps_poll_template),
81 0, WL1271_RATE_AUTOMATIC);
78 if (ret < 0) 82 if (ret < 0)
79 return ret; 83 return ret;
80 84
81 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL, 85 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_QOS_NULL_DATA, NULL,
82 sizeof 86 sizeof
83 (struct wl12xx_qos_null_data_template)); 87 (struct wl12xx_qos_null_data_template),
88 0, WL1271_RATE_AUTOMATIC);
84 if (ret < 0) 89 if (ret < 0)
85 return ret; 90 return ret;
86 91
87 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL, 92 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, NULL,
88 sizeof 93 sizeof
89 (struct wl12xx_probe_resp_template)); 94 (struct wl12xx_probe_resp_template),
95 0, WL1271_RATE_AUTOMATIC);
90 if (ret < 0) 96 if (ret < 0)
91 return ret; 97 return ret;
92 98
93 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL, 99 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, NULL,
94 sizeof 100 sizeof
95 (struct wl12xx_beacon_template)); 101 (struct wl12xx_beacon_template),
102 0, WL1271_RATE_AUTOMATIC);
96 if (ret < 0) 103 if (ret < 0)
97 return ret; 104 return ret;
98 105
106 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
107 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
108 WL1271_CMD_TEMPL_MAX_SIZE, i,
109 WL1271_RATE_AUTOMATIC);
110 if (ret < 0)
111 return ret;
112 }
113
99 return 0; 114 return 0;
100} 115}
101 116
@@ -161,11 +176,11 @@ int wl1271_init_pta(struct wl1271 *wl)
161{ 176{
162 int ret; 177 int ret;
163 178
164 ret = wl1271_acx_sg_enable(wl); 179 ret = wl1271_acx_sg_cfg(wl);
165 if (ret < 0) 180 if (ret < 0)
166 return ret; 181 return ret;
167 182
168 ret = wl1271_acx_sg_cfg(wl); 183 ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
169 if (ret < 0) 184 if (ret < 0)
170 return ret; 185 return ret;
171 186
@@ -237,7 +252,7 @@ int wl1271_hw_init(struct wl1271 *wl)
237 goto out_free_memmap; 252 goto out_free_memmap;
238 253
239 /* Initialize connection monitoring thresholds */ 254 /* Initialize connection monitoring thresholds */
240 ret = wl1271_acx_conn_monit_params(wl); 255 ret = wl1271_acx_conn_monit_params(wl, false);
241 if (ret < 0) 256 if (ret < 0)
242 goto out_free_memmap; 257 goto out_free_memmap;
243 258
@@ -325,6 +340,24 @@ int wl1271_hw_init(struct wl1271 *wl)
325 if (ret < 0) 340 if (ret < 0)
326 goto out_free_memmap; 341 goto out_free_memmap;
327 342
343 /* disable all keep-alive templates */
344 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
345 ret = wl1271_acx_keep_alive_config(wl, i,
346 ACX_KEEP_ALIVE_TPL_INVALID);
347 if (ret < 0)
348 goto out_free_memmap;
349 }
350
351 /* disable the keep-alive feature */
352 ret = wl1271_acx_keep_alive_mode(wl, false);
353 if (ret < 0)
354 goto out_free_memmap;
355
356 /* Configure rssi/snr averaging weights */
357 ret = wl1271_acx_rssi_snr_avg_weights(wl);
358 if (ret < 0)
359 goto out_free_memmap;
360
328 return 0; 361 return 0;
329 362
330 out_free_memmap: 363 out_free_memmap:
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
index 5cd94d5666c2..c8759acef131 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -28,30 +28,29 @@
28 28
29#include "wl1271.h" 29#include "wl1271.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h"
32#include "wl1271_io.h" 31#include "wl1271_io.h"
33 32
34static int wl1271_translate_addr(struct wl1271 *wl, int addr) 33#define OCP_CMD_LOOP 32
34
35#define OCP_CMD_WRITE 0x1
36#define OCP_CMD_READ 0x2
37
38#define OCP_READY_MASK BIT(18)
39#define OCP_STATUS_MASK (BIT(16) | BIT(17))
40
41#define OCP_STATUS_NO_RESP 0x00000
42#define OCP_STATUS_OK 0x10000
43#define OCP_STATUS_REQ_FAILED 0x20000
44#define OCP_STATUS_RESP_ERROR 0x30000
45
46void wl1271_disable_interrupts(struct wl1271 *wl)
35{ 47{
36 /* 48 wl->if_ops->disable_irq(wl);
37 * To translate, first check to which window of addresses the 49}
38 * particular address belongs. Then subtract the starting address 50
39 * of that window from the address. Then, add offset of the 51void wl1271_enable_interrupts(struct wl1271 *wl)
40 * translated region. 52{
41 * 53 wl->if_ops->enable_irq(wl);
42 * The translated regions occur next to each other in physical device
43 * memory, so just add the sizes of the preceeding address regions to
44 * get the offset to the new region.
45 *
46 * Currently, only the two first regions are addressed, and the
47 * assumption is that all addresses will fall into either of those
48 * two.
49 */
50 if ((addr >= wl->part.reg.start) &&
51 (addr < wl->part.reg.start + wl->part.reg.size))
52 return addr - wl->part.reg.start + wl->part.mem.size;
53 else
54 return addr - wl->part.mem.start;
55} 54}
56 55
57/* Set the SPI partitions to access the chip addresses 56/* Set the SPI partitions to access the chip addresses
@@ -117,54 +116,12 @@ int wl1271_set_partition(struct wl1271 *wl,
117 116
118void wl1271_io_reset(struct wl1271 *wl) 117void wl1271_io_reset(struct wl1271 *wl)
119{ 118{
120 wl1271_spi_reset(wl); 119 wl->if_ops->reset(wl);
121} 120}
122 121
123void wl1271_io_init(struct wl1271 *wl) 122void wl1271_io_init(struct wl1271 *wl)
124{ 123{
125 wl1271_spi_init(wl); 124 wl->if_ops->init(wl);
126}
127
128void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
129 size_t len, bool fixed)
130{
131 wl1271_spi_raw_write(wl, addr, buf, len, fixed);
132}
133
134void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
135 size_t len, bool fixed)
136{
137 wl1271_spi_raw_read(wl, addr, buf, len, fixed);
138}
139
140void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
141 bool fixed)
142{
143 int physical;
144
145 physical = wl1271_translate_addr(wl, addr);
146
147 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
148}
149
150void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
151 bool fixed)
152{
153 int physical;
154
155 physical = wl1271_translate_addr(wl, addr);
156
157 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
158}
159
160u32 wl1271_read32(struct wl1271 *wl, int addr)
161{
162 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
163}
164
165void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
166{
167 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
168} 125}
169 126
170void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val) 127void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
index fa9a0b35788f..d8837ef0bb40 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -25,31 +25,49 @@
25#ifndef __WL1271_IO_H__ 25#ifndef __WL1271_IO_H__
26#define __WL1271_IO_H__ 26#define __WL1271_IO_H__
27 27
28#include "wl1271_reg.h"
29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31
32#define HW_PARTITION_REGISTERS_ADDR 0x1FFC0
33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40
41#define HW_ACCESS_REGISTER_SIZE 4
42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
44
28struct wl1271; 45struct wl1271;
29 46
47void wl1271_disable_interrupts(struct wl1271 *wl);
48void wl1271_enable_interrupts(struct wl1271 *wl);
49
30void wl1271_io_reset(struct wl1271 *wl); 50void wl1271_io_reset(struct wl1271 *wl);
31void wl1271_io_init(struct wl1271 *wl); 51void wl1271_io_init(struct wl1271 *wl);
32 52
33/* Raw target IO, address is not translated */ 53static inline struct device *wl1271_wl_to_dev(struct wl1271 *wl)
34void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf, 54{
35 size_t len, bool fixed); 55 return wl->if_ops->dev(wl);
36void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf, 56}
37 size_t len, bool fixed);
38 57
39/* Translated target IO */
40void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
41 bool fixed);
42void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
43 bool fixed);
44u32 wl1271_read32(struct wl1271 *wl, int addr);
45void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
46 58
47/* Top Register IO */ 59/* Raw target IO, address is not translated */
48void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val); 60static inline void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
49u16 wl1271_top_reg_read(struct wl1271 *wl, int addr); 61 size_t len, bool fixed)
62{
63 wl->if_ops->write(wl, addr, buf, len, fixed);
64}
50 65
51int wl1271_set_partition(struct wl1271 *wl, 66static inline void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
52 struct wl1271_partition_set *p); 67 size_t len, bool fixed)
68{
69 wl->if_ops->read(wl, addr, buf, len, fixed);
70}
53 71
54static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr) 72static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
55{ 73{
@@ -65,4 +83,87 @@ static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
65 wl1271_raw_write(wl, addr, &wl->buffer_32, 83 wl1271_raw_write(wl, addr, &wl->buffer_32,
66 sizeof(wl->buffer_32), false); 84 sizeof(wl->buffer_32), false);
67} 85}
86
87/* Translated target IO */
88static inline int wl1271_translate_addr(struct wl1271 *wl, int addr)
89{
90 /*
91 * To translate, first check to which window of addresses the
92 * particular address belongs. Then subtract the starting address
93 * of that window from the address. Then, add offset of the
94 * translated region.
95 *
96 * The translated regions occur next to each other in physical device
97 * memory, so just add the sizes of the preceeding address regions to
98 * get the offset to the new region.
99 *
100 * Currently, only the two first regions are addressed, and the
101 * assumption is that all addresses will fall into either of those
102 * two.
103 */
104 if ((addr >= wl->part.reg.start) &&
105 (addr < wl->part.reg.start + wl->part.reg.size))
106 return addr - wl->part.reg.start + wl->part.mem.size;
107 else
108 return addr - wl->part.mem.start;
109}
110
111static inline void wl1271_read(struct wl1271 *wl, int addr, void *buf,
112 size_t len, bool fixed)
113{
114 int physical;
115
116 physical = wl1271_translate_addr(wl, addr);
117
118 wl1271_raw_read(wl, physical, buf, len, fixed);
119}
120
121static inline void wl1271_write(struct wl1271 *wl, int addr, void *buf,
122 size_t len, bool fixed)
123{
124 int physical;
125
126 physical = wl1271_translate_addr(wl, addr);
127
128 wl1271_raw_write(wl, physical, buf, len, fixed);
129}
130
131static inline u32 wl1271_read32(struct wl1271 *wl, int addr)
132{
133 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
134}
135
136static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
137{
138 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
139}
140
141static inline void wl1271_power_off(struct wl1271 *wl)
142{
143 wl->if_ops->power(wl, false);
144 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
145}
146
147static inline void wl1271_power_on(struct wl1271 *wl)
148{
149 wl->if_ops->power(wl, true);
150 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
151}
152
153
154/* Top Register IO */
155void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
156u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
157
158int wl1271_set_partition(struct wl1271 *wl,
159 struct wl1271_partition_set *p);
160
161/* Functions from wl1271_main.c */
162
163int wl1271_register_hw(struct wl1271 *wl);
164void wl1271_unregister_hw(struct wl1271 *wl);
165int wl1271_init_ieee80211(struct wl1271 *wl);
166struct ieee80211_hw *wl1271_alloc_hw(void);
167int wl1271_free_hw(struct wl1271 *wl);
168
68#endif 169#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 65a1aeba2419..814f300c3f17 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -22,23 +22,19 @@
22 */ 22 */
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/interrupt.h>
27#include <linux/firmware.h> 25#include <linux/firmware.h>
28#include <linux/delay.h> 26#include <linux/delay.h>
29#include <linux/irq.h>
30#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
31#include <linux/crc32.h> 28#include <linux/crc32.h>
32#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
33#include <linux/vmalloc.h> 30#include <linux/vmalloc.h>
34#include <linux/spi/wl12xx.h>
35#include <linux/inetdevice.h> 31#include <linux/inetdevice.h>
32#include <linux/platform_device.h>
36#include <linux/slab.h> 33#include <linux/slab.h>
37 34
38#include "wl1271.h" 35#include "wl1271.h"
39#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
40#include "wl1271_reg.h" 37#include "wl1271_reg.h"
41#include "wl1271_spi.h"
42#include "wl1271_io.h" 38#include "wl1271_io.h"
43#include "wl1271_event.h" 39#include "wl1271_event.h"
44#include "wl1271_tx.h" 40#include "wl1271_tx.h"
@@ -54,17 +50,57 @@
54 50
55static struct conf_drv_settings default_conf = { 51static struct conf_drv_settings default_conf = {
56 .sg = { 52 .sg = {
57 .per_threshold = 7500, 53 .params = {
58 .max_scan_compensation_time = 120000, 54 [CONF_SG_BT_PER_THRESHOLD] = 7500,
59 .nfs_sample_interval = 400, 55 [CONF_SG_HV3_MAX_OVERRIDE] = 0,
60 .load_ratio = 50, 56 [CONF_SG_BT_NFS_SAMPLE_INTERVAL] = 400,
61 .auto_ps_mode = 0, 57 [CONF_SG_BT_LOAD_RATIO] = 50,
62 .probe_req_compensation = 170, 58 [CONF_SG_AUTO_PS_MODE] = 0,
63 .scan_window_compensation = 50, 59 [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
64 .antenna_config = 0, 60 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
65 .beacon_miss_threshold = 60, 61 [CONF_SG_ANTENNA_CONFIGURATION] = 0,
66 .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS, 62 [CONF_SG_BEACON_MISS_PERCENT] = 60,
67 .rate_adaptation_snr = 0 63 [CONF_SG_RATE_ADAPT_THRESH] = 12,
64 [CONF_SG_RATE_ADAPT_SNR] = 0,
65 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_BR] = 10,
66 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_BR] = 30,
67 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_BR] = 8,
68 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_BR] = 20,
69 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_BR] = 50,
70 /* Note: with UPSD, this should be 4 */
71 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_BR] = 8,
72 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MIN_EDR] = 7,
73 [CONF_SG_WLAN_PS_BT_ACL_MASTER_MAX_EDR] = 25,
74 [CONF_SG_WLAN_PS_MAX_BT_ACL_MASTER_EDR] = 20,
75 /* Note: with UPDS, this should be 15 */
76 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MIN_EDR] = 8,
77 /* Note: with UPDS, this should be 50 */
78 [CONF_SG_WLAN_PS_BT_ACL_SLAVE_MAX_EDR] = 40,
79 /* Note: with UPDS, this should be 10 */
80 [CONF_SG_WLAN_PS_MAX_BT_ACL_SLAVE_EDR] = 20,
81 [CONF_SG_RXT] = 1200,
82 [CONF_SG_TXT] = 1000,
83 [CONF_SG_ADAPTIVE_RXT_TXT] = 1,
84 [CONF_SG_PS_POLL_TIMEOUT] = 10,
85 [CONF_SG_UPSD_TIMEOUT] = 10,
86 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MIN_EDR] = 7,
87 [CONF_SG_WLAN_ACTIVE_BT_ACL_MASTER_MAX_EDR] = 15,
88 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_MASTER_EDR] = 15,
89 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MIN_EDR] = 8,
90 [CONF_SG_WLAN_ACTIVE_BT_ACL_SLAVE_MAX_EDR] = 20,
91 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_SLAVE_EDR] = 15,
92 [CONF_SG_WLAN_ACTIVE_BT_ACL_MIN_BR] = 20,
93 [CONF_SG_WLAN_ACTIVE_BT_ACL_MAX_BR] = 50,
94 [CONF_SG_WLAN_ACTIVE_MAX_BT_ACL_BR] = 10,
95 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
96 [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP] = 800,
97 [CONF_SG_PASSIVE_SCAN_A2DP_BT_TIME] = 75,
98 [CONF_SG_PASSIVE_SCAN_A2DP_WLAN_TIME] = 15,
99 [CONF_SG_HV3_MAX_SERVED] = 6,
100 [CONF_SG_DHCP_TIME] = 5000,
101 [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
102 },
103 .state = CONF_SG_PROTECTIVE,
68 }, 104 },
69 .rx = { 105 .rx = {
70 .rx_msdu_life_time = 512000, 106 .rx_msdu_life_time = 512000,
@@ -81,8 +117,7 @@ static struct conf_drv_settings default_conf = {
81 .tx = { 117 .tx = {
82 .tx_energy_detection = 0, 118 .tx_energy_detection = 0,
83 .rc_conf = { 119 .rc_conf = {
84 .enabled_rates = CONF_HW_BIT_RATE_1MBPS | 120 .enabled_rates = 0,
85 CONF_HW_BIT_RATE_2MBPS,
86 .short_retry_limit = 10, 121 .short_retry_limit = 10,
87 .long_retry_limit = 10, 122 .long_retry_limit = 10,
88 .aflags = 0 123 .aflags = 0
@@ -179,11 +214,13 @@ static struct conf_drv_settings default_conf = {
179 }, 214 },
180 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, 215 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
181 .tx_compl_timeout = 700, 216 .tx_compl_timeout = 700,
182 .tx_compl_threshold = 4 217 .tx_compl_threshold = 4,
218 .basic_rate = CONF_HW_BIT_RATE_1MBPS,
219 .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
183 }, 220 },
184 .conn = { 221 .conn = {
185 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, 222 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
186 .listen_interval = 0, 223 .listen_interval = 1,
187 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, 224 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
188 .bcn_filt_ie_count = 1, 225 .bcn_filt_ie_count = 1,
189 .bcn_filt_ie = { 226 .bcn_filt_ie = {
@@ -198,38 +235,11 @@ static struct conf_drv_settings default_conf = {
198 .broadcast_timeout = 20000, 235 .broadcast_timeout = 20000,
199 .rx_broadcast_in_ps = 1, 236 .rx_broadcast_in_ps = 1,
200 .ps_poll_threshold = 20, 237 .ps_poll_threshold = 20,
201 .sig_trigger_count = 2,
202 .sig_trigger = {
203 [0] = {
204 .threshold = -75,
205 .pacing = 500,
206 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
207 .type = CONF_TRIG_EVENT_TYPE_EDGE,
208 .direction = CONF_TRIG_EVENT_DIR_LOW,
209 .hysteresis = 2,
210 .index = 0,
211 .enable = 1
212 },
213 [1] = {
214 .threshold = -75,
215 .pacing = 500,
216 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
217 .type = CONF_TRIG_EVENT_TYPE_EDGE,
218 .direction = CONF_TRIG_EVENT_DIR_HIGH,
219 .hysteresis = 2,
220 .index = 1,
221 .enable = 1
222 }
223 },
224 .sig_weights = {
225 .rssi_bcn_avg_weight = 10,
226 .rssi_pkt_avg_weight = 10,
227 .snr_bcn_avg_weight = 10,
228 .snr_pkt_avg_weight = 10
229 },
230 .bet_enable = CONF_BET_MODE_ENABLE, 238 .bet_enable = CONF_BET_MODE_ENABLE,
231 .bet_max_consecutive = 10, 239 .bet_max_consecutive = 10,
232 .psm_entry_retries = 3 240 .psm_entry_retries = 3,
241 .keep_alive_interval = 55000,
242 .max_listen_interval = 20,
233 }, 243 },
234 .init = { 244 .init = {
235 .radioparam = { 245 .radioparam = {
@@ -243,9 +253,32 @@ static struct conf_drv_settings default_conf = {
243 .pm_config = { 253 .pm_config = {
244 .host_clk_settling_time = 5000, 254 .host_clk_settling_time = 5000,
245 .host_fast_wakeup_support = false 255 .host_fast_wakeup_support = false
256 },
257 .roam_trigger = {
258 /* FIXME: due to firmware bug, must use value 1 for now */
259 .trigger_pacing = 1,
260 .avg_weight_rssi_beacon = 20,
261 .avg_weight_rssi_data = 10,
262 .avg_weight_snr_beacon = 20,
263 .avg_weight_snr_data = 10
246 } 264 }
247}; 265};
248 266
267static void wl1271_device_release(struct device *dev)
268{
269
270}
271
272static struct platform_device wl1271_device = {
273 .name = "wl1271",
274 .id = -1,
275
276 /* device model insists to have a release function */
277 .dev = {
278 .release = wl1271_device_release,
279 },
280};
281
249static LIST_HEAD(wl_list); 282static LIST_HEAD(wl_list);
250 283
251static void wl1271_conf_init(struct wl1271 *wl) 284static void wl1271_conf_init(struct wl1271 *wl)
@@ -298,7 +331,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
298 goto out_free_memmap; 331 goto out_free_memmap;
299 332
300 /* Initialize connection monitoring thresholds */ 333 /* Initialize connection monitoring thresholds */
301 ret = wl1271_acx_conn_monit_params(wl); 334 ret = wl1271_acx_conn_monit_params(wl, false);
302 if (ret < 0) 335 if (ret < 0)
303 goto out_free_memmap; 336 goto out_free_memmap;
304 337
@@ -365,30 +398,14 @@ static int wl1271_plt_init(struct wl1271 *wl)
365 return ret; 398 return ret;
366} 399}
367 400
368static void wl1271_disable_interrupts(struct wl1271 *wl)
369{
370 disable_irq(wl->irq);
371}
372
373static void wl1271_power_off(struct wl1271 *wl)
374{
375 wl->set_power(false);
376 clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
377}
378
379static void wl1271_power_on(struct wl1271 *wl)
380{
381 wl->set_power(true);
382 set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
383}
384
385static void wl1271_fw_status(struct wl1271 *wl, 401static void wl1271_fw_status(struct wl1271 *wl,
386 struct wl1271_fw_status *status) 402 struct wl1271_fw_status *status)
387{ 403{
404 struct timespec ts;
388 u32 total = 0; 405 u32 total = 0;
389 int i; 406 int i;
390 407
391 wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); 408 wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
392 409
393 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 410 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
394 "drv_rx_counter = %d, tx_results_counter = %d)", 411 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -413,14 +430,19 @@ static void wl1271_fw_status(struct wl1271 *wl,
413 ieee80211_queue_work(wl->hw, &wl->tx_work); 430 ieee80211_queue_work(wl->hw, &wl->tx_work);
414 431
415 /* update the host-chipset time offset */ 432 /* update the host-chipset time offset */
416 wl->time_offset = jiffies_to_usecs(jiffies) - 433 getnstimeofday(&ts);
417 le32_to_cpu(status->fw_localtime); 434 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
435 (s64)le32_to_cpu(status->fw_localtime);
418} 436}
419 437
438#define WL1271_IRQ_MAX_LOOPS 10
439
420static void wl1271_irq_work(struct work_struct *work) 440static void wl1271_irq_work(struct work_struct *work)
421{ 441{
422 int ret; 442 int ret;
423 u32 intr; 443 u32 intr;
444 int loopcount = WL1271_IRQ_MAX_LOOPS;
445 unsigned long flags;
424 struct wl1271 *wl = 446 struct wl1271 *wl =
425 container_of(work, struct wl1271, irq_work); 447 container_of(work, struct wl1271, irq_work);
426 448
@@ -428,91 +450,77 @@ static void wl1271_irq_work(struct work_struct *work)
428 450
429 wl1271_debug(DEBUG_IRQ, "IRQ work"); 451 wl1271_debug(DEBUG_IRQ, "IRQ work");
430 452
431 if (wl->state == WL1271_STATE_OFF) 453 if (unlikely(wl->state == WL1271_STATE_OFF))
432 goto out; 454 goto out;
433 455
434 ret = wl1271_ps_elp_wakeup(wl, true); 456 ret = wl1271_ps_elp_wakeup(wl, true);
435 if (ret < 0) 457 if (ret < 0)
436 goto out; 458 goto out;
437 459
438 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 460 spin_lock_irqsave(&wl->wl_lock, flags);
439 461 while (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags) && loopcount) {
440 wl1271_fw_status(wl, wl->fw_status); 462 clear_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
441 intr = le32_to_cpu(wl->fw_status->intr); 463 spin_unlock_irqrestore(&wl->wl_lock, flags);
442 if (!intr) { 464 loopcount--;
443 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 465
444 goto out_sleep; 466 wl1271_fw_status(wl, wl->fw_status);
445 } 467 intr = le32_to_cpu(wl->fw_status->intr);
468 if (!intr) {
469 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
470 continue;
471 }
446 472
447 intr &= WL1271_INTR_MASK; 473 intr &= WL1271_INTR_MASK;
448 474
449 if (intr & WL1271_ACX_INTR_EVENT_A) { 475 if (intr & WL1271_ACX_INTR_DATA) {
450 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); 476 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
451 wl1271_event_handle(wl, 0);
452 }
453 477
454 if (intr & WL1271_ACX_INTR_EVENT_B) { 478 /* check for tx results */
455 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); 479 if (wl->fw_status->tx_results_counter !=
456 wl1271_event_handle(wl, 1); 480 (wl->tx_results_count & 0xff))
457 } 481 wl1271_tx_complete(wl);
458 482
459 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 483 wl1271_rx(wl, wl->fw_status);
460 wl1271_debug(DEBUG_IRQ, 484 }
461 "WL1271_ACX_INTR_INIT_COMPLETE");
462 485
463 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 486 if (intr & WL1271_ACX_INTR_EVENT_A) {
464 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 487 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
488 wl1271_event_handle(wl, 0);
489 }
465 490
466 if (intr & WL1271_ACX_INTR_DATA) { 491 if (intr & WL1271_ACX_INTR_EVENT_B) {
467 u8 tx_res_cnt = wl->fw_status->tx_results_counter - 492 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
468 wl->tx_results_count; 493 wl1271_event_handle(wl, 1);
494 }
469 495
470 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 496 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
497 wl1271_debug(DEBUG_IRQ,
498 "WL1271_ACX_INTR_INIT_COMPLETE");
471 499
472 /* check for tx results */ 500 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
473 if (tx_res_cnt) 501 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
474 wl1271_tx_complete(wl, tx_res_cnt);
475 502
476 wl1271_rx(wl, wl->fw_status); 503 spin_lock_irqsave(&wl->wl_lock, flags);
477 } 504 }
478 505
479out_sleep: 506 if (test_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags))
480 wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, 507 ieee80211_queue_work(wl->hw, &wl->irq_work);
481 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 508 else
509 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
510 spin_unlock_irqrestore(&wl->wl_lock, flags);
511
482 wl1271_ps_elp_sleep(wl); 512 wl1271_ps_elp_sleep(wl);
483 513
484out: 514out:
485 mutex_unlock(&wl->mutex); 515 mutex_unlock(&wl->mutex);
486} 516}
487 517
488static irqreturn_t wl1271_irq(int irq, void *cookie)
489{
490 struct wl1271 *wl;
491 unsigned long flags;
492
493 wl1271_debug(DEBUG_IRQ, "IRQ");
494
495 wl = cookie;
496
497 /* complete the ELP completion */
498 spin_lock_irqsave(&wl->wl_lock, flags);
499 if (wl->elp_compl) {
500 complete(wl->elp_compl);
501 wl->elp_compl = NULL;
502 }
503
504 ieee80211_queue_work(wl->hw, &wl->irq_work);
505 spin_unlock_irqrestore(&wl->wl_lock, flags);
506
507 return IRQ_HANDLED;
508}
509
510static int wl1271_fetch_firmware(struct wl1271 *wl) 518static int wl1271_fetch_firmware(struct wl1271 *wl)
511{ 519{
512 const struct firmware *fw; 520 const struct firmware *fw;
513 int ret; 521 int ret;
514 522
515 ret = request_firmware(&fw, WL1271_FW_NAME, &wl->spi->dev); 523 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
516 524
517 if (ret < 0) { 525 if (ret < 0) {
518 wl1271_error("could not get firmware: %d", ret); 526 wl1271_error("could not get firmware: %d", ret);
@@ -545,46 +553,12 @@ out:
545 return ret; 553 return ret;
546} 554}
547 555
548static int wl1271_update_mac_addr(struct wl1271 *wl)
549{
550 int ret = 0;
551 u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
552
553 /* get mac address from the NVS */
554 wl->mac_addr[0] = nvs_ptr[11];
555 wl->mac_addr[1] = nvs_ptr[10];
556 wl->mac_addr[2] = nvs_ptr[6];
557 wl->mac_addr[3] = nvs_ptr[5];
558 wl->mac_addr[4] = nvs_ptr[4];
559 wl->mac_addr[5] = nvs_ptr[3];
560
561 /* FIXME: if it is a zero-address, we should bail out. Now, instead,
562 we randomize an address */
563 if (is_zero_ether_addr(wl->mac_addr)) {
564 static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
565 memcpy(wl->mac_addr, nokia_oui, 3);
566 get_random_bytes(wl->mac_addr + 3, 3);
567
568 /* update this address to the NVS */
569 nvs_ptr[11] = wl->mac_addr[0];
570 nvs_ptr[10] = wl->mac_addr[1];
571 nvs_ptr[6] = wl->mac_addr[2];
572 nvs_ptr[5] = wl->mac_addr[3];
573 nvs_ptr[4] = wl->mac_addr[4];
574 nvs_ptr[3] = wl->mac_addr[5];
575 }
576
577 SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
578
579 return ret;
580}
581
582static int wl1271_fetch_nvs(struct wl1271 *wl) 556static int wl1271_fetch_nvs(struct wl1271 *wl)
583{ 557{
584 const struct firmware *fw; 558 const struct firmware *fw;
585 int ret; 559 int ret;
586 560
587 ret = request_firmware(&fw, WL1271_NVS_NAME, &wl->spi->dev); 561 ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
588 562
589 if (ret < 0) { 563 if (ret < 0) {
590 wl1271_error("could not get nvs file: %d", ret); 564 wl1271_error("could not get nvs file: %d", ret);
@@ -608,8 +582,6 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
608 582
609 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file)); 583 memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
610 584
611 ret = wl1271_update_mac_addr(wl);
612
613out: 585out:
614 release_firmware(fw); 586 release_firmware(fw);
615 587
@@ -826,15 +798,13 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
826 * The workqueue is slow to process the tx_queue and we need stop 798 * The workqueue is slow to process the tx_queue and we need stop
827 * the queue here, otherwise the queue will get too long. 799 * the queue here, otherwise the queue will get too long.
828 */ 800 */
829 if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_MAX_LENGTH) { 801 if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
830 ieee80211_stop_queues(wl->hw); 802 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
831 803
832 /* 804 spin_lock_irqsave(&wl->wl_lock, flags);
833 * FIXME: this is racy, the variable is not properly 805 ieee80211_stop_queues(wl->hw);
834 * protected. Maybe fix this by removing the stupid
835 * variable altogether and checking the real queue state?
836 */
837 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags); 806 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
807 spin_unlock_irqrestore(&wl->wl_lock, flags);
838 } 808 }
839 809
840 return NETDEV_TX_OK; 810 return NETDEV_TX_OK;
@@ -929,13 +899,60 @@ static struct notifier_block wl1271_dev_notifier = {
929 899
930static int wl1271_op_start(struct ieee80211_hw *hw) 900static int wl1271_op_start(struct ieee80211_hw *hw)
931{ 901{
902 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
903
904 /*
905 * We have to delay the booting of the hardware because
906 * we need to know the local MAC address before downloading and
907 * initializing the firmware. The MAC address cannot be changed
908 * after boot, and without the proper MAC address, the firmware
909 * will not function properly.
910 *
911 * The MAC address is first known when the corresponding interface
912 * is added. That is where we will initialize the hardware.
913 */
914
915 return 0;
916}
917
918static void wl1271_op_stop(struct ieee80211_hw *hw)
919{
920 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
921}
922
923static int wl1271_op_add_interface(struct ieee80211_hw *hw,
924 struct ieee80211_vif *vif)
925{
932 struct wl1271 *wl = hw->priv; 926 struct wl1271 *wl = hw->priv;
933 int retries = WL1271_BOOT_RETRIES; 927 int retries = WL1271_BOOT_RETRIES;
934 int ret = 0; 928 int ret = 0;
935 929
936 wl1271_debug(DEBUG_MAC80211, "mac80211 start"); 930 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
931 vif->type, vif->addr);
937 932
938 mutex_lock(&wl->mutex); 933 mutex_lock(&wl->mutex);
934 if (wl->vif) {
935 ret = -EBUSY;
936 goto out;
937 }
938
939 wl->vif = vif;
940
941 switch (vif->type) {
942 case NL80211_IFTYPE_STATION:
943 wl->bss_type = BSS_TYPE_STA_BSS;
944 wl->set_bss_type = BSS_TYPE_STA_BSS;
945 break;
946 case NL80211_IFTYPE_ADHOC:
947 wl->bss_type = BSS_TYPE_IBSS;
948 wl->set_bss_type = BSS_TYPE_STA_BSS;
949 break;
950 default:
951 ret = -EOPNOTSUPP;
952 goto out;
953 }
954
955 memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
939 956
940 if (wl->state != WL1271_STATE_OFF) { 957 if (wl->state != WL1271_STATE_OFF) {
941 wl1271_error("cannot start because not in off state: %d", 958 wl1271_error("cannot start because not in off state: %d",
@@ -991,19 +1008,20 @@ out:
991 return ret; 1008 return ret;
992} 1009}
993 1010
994static void wl1271_op_stop(struct ieee80211_hw *hw) 1011static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1012 struct ieee80211_vif *vif)
995{ 1013{
996 struct wl1271 *wl = hw->priv; 1014 struct wl1271 *wl = hw->priv;
997 int i; 1015 int i;
998 1016
999 wl1271_info("down");
1000
1001 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1002
1003 unregister_inetaddr_notifier(&wl1271_dev_notifier); 1017 unregister_inetaddr_notifier(&wl1271_dev_notifier);
1004 list_del(&wl->list);
1005 1018
1006 mutex_lock(&wl->mutex); 1019 mutex_lock(&wl->mutex);
1020 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1021
1022 wl1271_info("down");
1023
1024 list_del(&wl->list);
1007 1025
1008 WARN_ON(wl->state != WL1271_STATE_ON); 1026 WARN_ON(wl->state != WL1271_STATE_ON);
1009 1027
@@ -1032,6 +1050,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1032 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); 1050 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
1033 wl->ssid_len = 0; 1051 wl->ssid_len = 0;
1034 wl->bss_type = MAX_BSS_TYPE; 1052 wl->bss_type = MAX_BSS_TYPE;
1053 wl->set_bss_type = MAX_BSS_TYPE;
1035 wl->band = IEEE80211_BAND_2GHZ; 1054 wl->band = IEEE80211_BAND_2GHZ;
1036 1055
1037 wl->rx_counter = 0; 1056 wl->rx_counter = 0;
@@ -1041,134 +1060,63 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
1041 wl->tx_results_count = 0; 1060 wl->tx_results_count = 0;
1042 wl->tx_packets_count = 0; 1061 wl->tx_packets_count = 0;
1043 wl->tx_security_last_seq = 0; 1062 wl->tx_security_last_seq = 0;
1044 wl->tx_security_seq_16 = 0; 1063 wl->tx_security_seq = 0;
1045 wl->tx_security_seq_32 = 0;
1046 wl->time_offset = 0; 1064 wl->time_offset = 0;
1047 wl->session_counter = 0; 1065 wl->session_counter = 0;
1048 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1066 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
1049 wl->sta_rate_set = 0; 1067 wl->sta_rate_set = 0;
1050 wl->flags = 0; 1068 wl->flags = 0;
1069 wl->vif = NULL;
1070 wl->filters = 0;
1051 1071
1052 for (i = 0; i < NUM_TX_QUEUES; i++) 1072 for (i = 0; i < NUM_TX_QUEUES; i++)
1053 wl->tx_blocks_freed[i] = 0; 1073 wl->tx_blocks_freed[i] = 0;
1054 1074
1055 wl1271_debugfs_reset(wl); 1075 wl1271_debugfs_reset(wl);
1056 mutex_unlock(&wl->mutex);
1057}
1058
1059static int wl1271_op_add_interface(struct ieee80211_hw *hw,
1060 struct ieee80211_vif *vif)
1061{
1062 struct wl1271 *wl = hw->priv;
1063 int ret = 0;
1064
1065 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
1066 vif->type, vif->addr);
1067
1068 mutex_lock(&wl->mutex);
1069 if (wl->vif) {
1070 ret = -EBUSY;
1071 goto out;
1072 }
1073
1074 wl->vif = vif;
1075 1076
1076 switch (vif->type) { 1077 kfree(wl->fw_status);
1077 case NL80211_IFTYPE_STATION: 1078 wl->fw_status = NULL;
1078 wl->bss_type = BSS_TYPE_STA_BSS; 1079 kfree(wl->tx_res_if);
1079 break; 1080 wl->tx_res_if = NULL;
1080 case NL80211_IFTYPE_ADHOC: 1081 kfree(wl->target_mem_map);
1081 wl->bss_type = BSS_TYPE_IBSS; 1082 wl->target_mem_map = NULL;
1082 break;
1083 default:
1084 ret = -EOPNOTSUPP;
1085 goto out;
1086 }
1087
1088 /* FIXME: what if conf->mac_addr changes? */
1089 1083
1090out:
1091 mutex_unlock(&wl->mutex); 1084 mutex_unlock(&wl->mutex);
1092 return ret;
1093} 1085}
1094 1086
1095static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1087static void wl1271_configure_filters(struct wl1271 *wl, unsigned int filters)
1096 struct ieee80211_vif *vif)
1097{ 1088{
1098 struct wl1271 *wl = hw->priv; 1089 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1099 1090 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1100 mutex_lock(&wl->mutex);
1101 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1102 wl->vif = NULL;
1103 mutex_unlock(&wl->mutex);
1104}
1105
1106#if 0
1107static int wl1271_op_config_interface(struct ieee80211_hw *hw,
1108 struct ieee80211_vif *vif,
1109 struct ieee80211_if_conf *conf)
1110{
1111 struct wl1271 *wl = hw->priv;
1112 struct sk_buff *beacon;
1113 int ret;
1114
1115 wl1271_debug(DEBUG_MAC80211, "mac80211 config_interface bssid %pM",
1116 conf->bssid);
1117 wl1271_dump_ascii(DEBUG_MAC80211, "ssid: ", conf->ssid,
1118 conf->ssid_len);
1119
1120 mutex_lock(&wl->mutex);
1121
1122 ret = wl1271_ps_elp_wakeup(wl, false);
1123 if (ret < 0)
1124 goto out;
1125 1091
1126 if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) { 1092 /* combine requested filters with current filter config */
1127 wl1271_debug(DEBUG_MAC80211, "bssid changed"); 1093 filters = wl->filters | filters;
1128 1094
1129 memcpy(wl->bssid, conf->bssid, ETH_ALEN); 1095 wl1271_debug(DEBUG_FILTERS, "RX filters set: ");
1130 1096
1131 ret = wl1271_cmd_join(wl); 1097 if (filters & FIF_PROMISC_IN_BSS) {
1132 if (ret < 0) 1098 wl1271_debug(DEBUG_FILTERS, " - FIF_PROMISC_IN_BSS");
1133 goto out_sleep; 1099 wl->rx_config &= ~CFG_UNI_FILTER_EN;
1134 1100 wl->rx_config |= CFG_BSSID_FILTER_EN;
1135 ret = wl1271_cmd_build_null_data(wl);
1136 if (ret < 0)
1137 goto out_sleep;
1138 } 1101 }
1139 1102 if (filters & FIF_BCN_PRBRESP_PROMISC) {
1140 wl->ssid_len = conf->ssid_len; 1103 wl1271_debug(DEBUG_FILTERS, " - FIF_BCN_PRBRESP_PROMISC");
1141 if (wl->ssid_len) 1104 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1142 memcpy(wl->ssid, conf->ssid, wl->ssid_len); 1105 wl->rx_config &= ~CFG_SSID_FILTER_EN;
1143 1106 }
1144 if (conf->changed & IEEE80211_IFCC_BEACON) { 1107 if (filters & FIF_OTHER_BSS) {
1145 beacon = ieee80211_beacon_get(hw, vif); 1108 wl1271_debug(DEBUG_FILTERS, " - FIF_OTHER_BSS");
1146 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1109 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1147 beacon->data, beacon->len); 1110 }
1148 1111 if (filters & FIF_CONTROL) {
1149 if (ret < 0) { 1112 wl1271_debug(DEBUG_FILTERS, " - FIF_CONTROL");
1150 dev_kfree_skb(beacon); 1113 wl->rx_filter |= CFG_RX_CTL_EN;
1151 goto out_sleep; 1114 }
1152 } 1115 if (filters & FIF_FCSFAIL) {
1153 1116 wl1271_debug(DEBUG_FILTERS, " - FIF_FCSFAIL");
1154 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_PROBE_RESPONSE, 1117 wl->rx_filter |= CFG_RX_FCS_ERROR;
1155 beacon->data, beacon->len);
1156
1157 dev_kfree_skb(beacon);
1158
1159 if (ret < 0)
1160 goto out_sleep;
1161 } 1118 }
1162
1163out_sleep:
1164 wl1271_ps_elp_sleep(wl);
1165
1166out:
1167 mutex_unlock(&wl->mutex);
1168
1169 return ret;
1170} 1119}
1171#endif
1172 1120
1173static int wl1271_join_channel(struct wl1271 *wl, int channel) 1121static int wl1271_join_channel(struct wl1271 *wl, int channel)
1174{ 1122{
@@ -1177,17 +1125,13 @@ static int wl1271_join_channel(struct wl1271 *wl, int channel)
1177 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde, 1125 static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
1178 0xad, 0xbe, 0xef }; 1126 0xad, 0xbe, 0xef };
1179 1127
1180 /* the dummy join is not required for ad-hoc */
1181 if (wl->bss_type == BSS_TYPE_IBSS)
1182 goto out;
1183
1184 /* disable mac filter, so we hear everything */
1185 wl->rx_config &= ~CFG_BSSID_FILTER_EN;
1186
1187 wl->channel = channel; 1128 wl->channel = channel;
1188 memcpy(wl->bssid, dummy_bssid, ETH_ALEN); 1129 memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
1189 1130
1190 ret = wl1271_cmd_join(wl); 1131 /* pass through frames from all BSS */
1132 wl1271_configure_filters(wl, FIF_OTHER_BSS);
1133
1134 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1191 if (ret < 0) 1135 if (ret < 0)
1192 goto out; 1136 goto out;
1193 1137
@@ -1209,12 +1153,40 @@ static int wl1271_unjoin_channel(struct wl1271 *wl)
1209 clear_bit(WL1271_FLAG_JOINED, &wl->flags); 1153 clear_bit(WL1271_FLAG_JOINED, &wl->flags);
1210 wl->channel = 0; 1154 wl->channel = 0;
1211 memset(wl->bssid, 0, ETH_ALEN); 1155 memset(wl->bssid, 0, ETH_ALEN);
1212 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1156
1157 /* stop filterting packets based on bssid */
1158 wl1271_configure_filters(wl, FIF_OTHER_BSS);
1213 1159
1214out: 1160out:
1215 return ret; 1161 return ret;
1216} 1162}
1217 1163
1164static void wl1271_set_band_rate(struct wl1271 *wl)
1165{
1166 if (wl->band == IEEE80211_BAND_2GHZ)
1167 wl->basic_rate_set = wl->conf.tx.basic_rate;
1168 else
1169 wl->basic_rate_set = wl->conf.tx.basic_rate_5;
1170}
1171
1172static u32 wl1271_min_rate_get(struct wl1271 *wl)
1173{
1174 int i;
1175 u32 rate = 0;
1176
1177 if (!wl->basic_rate_set) {
1178 WARN_ON(1);
1179 wl->basic_rate_set = wl->conf.tx.basic_rate;
1180 }
1181
1182 for (i = 0; !rate; i++) {
1183 if ((wl->basic_rate_set >> i) & 0x1)
1184 rate = 1 << i;
1185 }
1186
1187 return rate;
1188}
1189
1218static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) 1190static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1219{ 1191{
1220 struct wl1271 *wl = hw->priv; 1192 struct wl1271 *wl = hw->priv;
@@ -1231,12 +1203,41 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1231 1203
1232 mutex_lock(&wl->mutex); 1204 mutex_lock(&wl->mutex);
1233 1205
1234 wl->band = conf->channel->band; 1206 if (unlikely(wl->state == WL1271_STATE_OFF))
1207 goto out;
1235 1208
1236 ret = wl1271_ps_elp_wakeup(wl, false); 1209 ret = wl1271_ps_elp_wakeup(wl, false);
1237 if (ret < 0) 1210 if (ret < 0)
1238 goto out; 1211 goto out;
1239 1212
1213 /* if the channel changes while joined, join again */
1214 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
1215 wl->band = conf->channel->band;
1216 wl->channel = channel;
1217
1218 /*
1219 * FIXME: the mac80211 should really provide a fixed rate
1220 * to use here. for now, just use the smallest possible rate
1221 * for the band as a fixed rate for association frames and
1222 * other control messages.
1223 */
1224 if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
1225 wl1271_set_band_rate(wl);
1226
1227 wl->basic_rate = wl1271_min_rate_get(wl);
1228 ret = wl1271_acx_rate_policies(wl);
1229 if (ret < 0)
1230 wl1271_warning("rate policy for update channel "
1231 "failed %d", ret);
1232
1233 if (test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1234 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1235 if (ret < 0)
1236 wl1271_warning("cmd join to update channel "
1237 "failed %d", ret);
1238 }
1239 }
1240
1240 if (changed & IEEE80211_CONF_CHANGE_IDLE) { 1241 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
1241 if (conf->flags & IEEE80211_CONF_IDLE && 1242 if (conf->flags & IEEE80211_CONF_IDLE &&
1242 test_bit(WL1271_FLAG_JOINED, &wl->flags)) 1243 test_bit(WL1271_FLAG_JOINED, &wl->flags))
@@ -1245,24 +1246,17 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1245 wl1271_join_channel(wl, channel); 1246 wl1271_join_channel(wl, channel);
1246 1247
1247 if (conf->flags & IEEE80211_CONF_IDLE) { 1248 if (conf->flags & IEEE80211_CONF_IDLE) {
1248 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 1249 wl->rate_set = wl1271_min_rate_get(wl);
1249 wl->sta_rate_set = 0; 1250 wl->sta_rate_set = 0;
1250 wl1271_acx_rate_policies(wl); 1251 wl1271_acx_rate_policies(wl);
1251 } 1252 wl1271_acx_keep_alive_config(
1253 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1254 ACX_KEEP_ALIVE_TPL_INVALID);
1255 set_bit(WL1271_FLAG_IDLE, &wl->flags);
1256 } else
1257 clear_bit(WL1271_FLAG_IDLE, &wl->flags);
1252 } 1258 }
1253 1259
1254 /* if the channel changes while joined, join again */
1255 if (channel != wl->channel &&
1256 test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
1257 wl->channel = channel;
1258 /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
1259 ret = wl1271_cmd_join(wl);
1260 if (ret < 0)
1261 wl1271_warning("cmd join to update channel failed %d",
1262 ret);
1263 } else
1264 wl->channel = channel;
1265
1266 if (conf->flags & IEEE80211_CONF_PS && 1260 if (conf->flags & IEEE80211_CONF_PS &&
1267 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1261 !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1268 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 1262 set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
@@ -1273,13 +1267,13 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1273 * through the bss_info_changed() hook. 1267 * through the bss_info_changed() hook.
1274 */ 1268 */
1275 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { 1269 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1276 wl1271_info("psm enabled"); 1270 wl1271_debug(DEBUG_PSM, "psm enabled");
1277 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, 1271 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
1278 true); 1272 true);
1279 } 1273 }
1280 } else if (!(conf->flags & IEEE80211_CONF_PS) && 1274 } else if (!(conf->flags & IEEE80211_CONF_PS) &&
1281 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { 1275 test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
1282 wl1271_info("psm disabled"); 1276 wl1271_debug(DEBUG_PSM, "psm disabled");
1283 1277
1284 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); 1278 clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
1285 1279
@@ -1311,12 +1305,17 @@ struct wl1271_filter_params {
1311 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; 1305 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
1312}; 1306};
1313 1307
1314static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 1308static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
1315 struct dev_addr_list *mc_list) 1309 struct netdev_hw_addr_list *mc_list)
1316{ 1310{
1317 struct wl1271_filter_params *fp; 1311 struct wl1271_filter_params *fp;
1312 struct netdev_hw_addr *ha;
1313 struct wl1271 *wl = hw->priv;
1318 int i; 1314 int i;
1319 1315
1316 if (unlikely(wl->state == WL1271_STATE_OFF))
1317 return 0;
1318
1320 fp = kzalloc(sizeof(*fp), GFP_ATOMIC); 1319 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
1321 if (!fp) { 1320 if (!fp) {
1322 wl1271_error("Out of memory setting filters."); 1321 wl1271_error("Out of memory setting filters.");
@@ -1324,21 +1323,16 @@ static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
1324 } 1323 }
1325 1324
1326 /* update multicast filtering parameters */ 1325 /* update multicast filtering parameters */
1327 fp->enabled = true;
1328 if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
1329 mc_count = 0;
1330 fp->enabled = false;
1331 }
1332
1333 fp->mc_list_length = 0; 1326 fp->mc_list_length = 0;
1334 for (i = 0; i < mc_count; i++) { 1327 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
1335 if (mc_list->da_addrlen == ETH_ALEN) { 1328 fp->enabled = false;
1329 } else {
1330 fp->enabled = true;
1331 netdev_hw_addr_list_for_each(ha, mc_list) {
1336 memcpy(fp->mc_list[fp->mc_list_length], 1332 memcpy(fp->mc_list[fp->mc_list_length],
1337 mc_list->da_addr, ETH_ALEN); 1333 ha->addr, ETH_ALEN);
1338 fp->mc_list_length++; 1334 fp->mc_list_length++;
1339 } else 1335 }
1340 wl1271_warning("Unknown mc address length.");
1341 mc_list = mc_list->next;
1342 } 1336 }
1343 1337
1344 return (u64)(unsigned long)fp; 1338 return (u64)(unsigned long)fp;
@@ -1363,15 +1357,16 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1363 1357
1364 mutex_lock(&wl->mutex); 1358 mutex_lock(&wl->mutex);
1365 1359
1366 if (wl->state == WL1271_STATE_OFF) 1360 *total &= WL1271_SUPPORTED_FILTERS;
1361 changed &= WL1271_SUPPORTED_FILTERS;
1362
1363 if (unlikely(wl->state == WL1271_STATE_OFF))
1367 goto out; 1364 goto out;
1368 1365
1369 ret = wl1271_ps_elp_wakeup(wl, false); 1366 ret = wl1271_ps_elp_wakeup(wl, false);
1370 if (ret < 0) 1367 if (ret < 0)
1371 goto out; 1368 goto out;
1372 1369
1373 *total &= WL1271_SUPPORTED_FILTERS;
1374 changed &= WL1271_SUPPORTED_FILTERS;
1375 1370
1376 if (*total & FIF_ALLMULTI) 1371 if (*total & FIF_ALLMULTI)
1377 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); 1372 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
@@ -1382,14 +1377,14 @@ static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
1382 if (ret < 0) 1377 if (ret < 0)
1383 goto out_sleep; 1378 goto out_sleep;
1384 1379
1385 kfree(fp);
1386
1387 /* FIXME: We still need to set our filters properly */
1388
1389 /* determine, whether supported filter values have changed */ 1380 /* determine, whether supported filter values have changed */
1390 if (changed == 0) 1381 if (changed == 0)
1391 goto out_sleep; 1382 goto out_sleep;
1392 1383
1384 /* configure filters */
1385 wl->filters = *total;
1386 wl1271_configure_filters(wl, 0);
1387
1393 /* apply configured filters */ 1388 /* apply configured filters */
1394 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter); 1389 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
1395 if (ret < 0) 1390 if (ret < 0)
@@ -1400,6 +1395,7 @@ out_sleep:
1400 1395
1401out: 1396out:
1402 mutex_unlock(&wl->mutex); 1397 mutex_unlock(&wl->mutex);
1398 kfree(fp);
1403} 1399}
1404 1400
1405static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1401static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -1450,15 +1446,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1450 key_type = KEY_TKIP; 1446 key_type = KEY_TKIP;
1451 1447
1452 key_conf->hw_key_idx = key_conf->keyidx; 1448 key_conf->hw_key_idx = key_conf->keyidx;
1453 tx_seq_32 = wl->tx_security_seq_32; 1449 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1454 tx_seq_16 = wl->tx_security_seq_16; 1450 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1455 break; 1451 break;
1456 case ALG_CCMP: 1452 case ALG_CCMP:
1457 key_type = KEY_AES; 1453 key_type = KEY_AES;
1458 1454
1459 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1455 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1460 tx_seq_32 = wl->tx_security_seq_32; 1456 tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq);
1461 tx_seq_16 = wl->tx_security_seq_16; 1457 tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq);
1462 break; 1458 break;
1463 default: 1459 default:
1464 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1460 wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -1545,10 +1541,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1545 goto out; 1541 goto out;
1546 1542
1547 if (wl1271_11a_enabled()) 1543 if (wl1271_11a_enabled())
1548 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, 1544 ret = wl1271_cmd_scan(hw->priv, ssid, len,
1545 req->ie, req->ie_len, 1, 0,
1549 WL1271_SCAN_BAND_DUAL, 3); 1546 WL1271_SCAN_BAND_DUAL, 3);
1550 else 1547 else
1551 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0, 1548 ret = wl1271_cmd_scan(hw->priv, ssid, len,
1549 req->ie, req->ie_len, 1, 0,
1552 WL1271_SCAN_BAND_2_4_GHZ, 3); 1550 WL1271_SCAN_BAND_2_4_GHZ, 3);
1553 1551
1554 wl1271_ps_elp_sleep(wl); 1552 wl1271_ps_elp_sleep(wl);
@@ -1562,10 +1560,13 @@ out:
1562static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 1560static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1563{ 1561{
1564 struct wl1271 *wl = hw->priv; 1562 struct wl1271 *wl = hw->priv;
1565 int ret; 1563 int ret = 0;
1566 1564
1567 mutex_lock(&wl->mutex); 1565 mutex_lock(&wl->mutex);
1568 1566
1567 if (unlikely(wl->state == WL1271_STATE_OFF))
1568 goto out;
1569
1569 ret = wl1271_ps_elp_wakeup(wl, false); 1570 ret = wl1271_ps_elp_wakeup(wl, false);
1570 if (ret < 0) 1571 if (ret < 0)
1571 goto out; 1572 goto out;
@@ -1607,6 +1608,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1607 enum wl1271_cmd_ps_mode mode; 1608 enum wl1271_cmd_ps_mode mode;
1608 struct wl1271 *wl = hw->priv; 1609 struct wl1271 *wl = hw->priv;
1609 bool do_join = false; 1610 bool do_join = false;
1611 bool do_keepalive = false;
1610 int ret; 1612 int ret;
1611 1613
1612 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed"); 1614 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1617,20 +1619,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1617 if (ret < 0) 1619 if (ret < 0)
1618 goto out; 1620 goto out;
1619 1621
1620 if (wl->bss_type == BSS_TYPE_IBSS) { 1622 if ((changed && BSS_CHANGED_BEACON_INT) &&
1621 /* FIXME: This implements rudimentary ad-hoc support - 1623 (wl->bss_type == BSS_TYPE_IBSS)) {
1622 proper templates are on the wish list and notification 1624 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon interval updated: %d",
1623 on when they change. This patch will update the templates 1625 bss_conf->beacon_int);
1624 on every call to this function. */ 1626
1627 wl->beacon_int = bss_conf->beacon_int;
1628 do_join = true;
1629 }
1630
1631 if ((changed && BSS_CHANGED_BEACON) &&
1632 (wl->bss_type == BSS_TYPE_IBSS)) {
1625 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif); 1633 struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
1626 1634
1635 wl1271_debug(DEBUG_ADHOC, "ad-hoc beacon updated");
1636
1627 if (beacon) { 1637 if (beacon) {
1628 struct ieee80211_hdr *hdr; 1638 struct ieee80211_hdr *hdr;
1629 1639
1630 wl1271_ssid_set(wl, beacon); 1640 wl1271_ssid_set(wl, beacon);
1631 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1641 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1632 beacon->data, 1642 beacon->data,
1633 beacon->len); 1643 beacon->len, 0,
1644 wl1271_min_rate_get(wl));
1634 1645
1635 if (ret < 0) { 1646 if (ret < 0) {
1636 dev_kfree_skb(beacon); 1647 dev_kfree_skb(beacon);
@@ -1645,7 +1656,8 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1645 ret = wl1271_cmd_template_set(wl, 1656 ret = wl1271_cmd_template_set(wl,
1646 CMD_TEMPL_PROBE_RESPONSE, 1657 CMD_TEMPL_PROBE_RESPONSE,
1647 beacon->data, 1658 beacon->data,
1648 beacon->len); 1659 beacon->len, 0,
1660 wl1271_min_rate_get(wl));
1649 dev_kfree_skb(beacon); 1661 dev_kfree_skb(beacon);
1650 if (ret < 0) 1662 if (ret < 0)
1651 goto out_sleep; 1663 goto out_sleep;
@@ -1655,20 +1667,44 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1655 } 1667 }
1656 } 1668 }
1657 1669
1670 if ((changed & BSS_CHANGED_BEACON_ENABLED) &&
1671 (wl->bss_type == BSS_TYPE_IBSS)) {
1672 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
1673 bss_conf->enable_beacon ? "enabled" : "disabled");
1674
1675 if (bss_conf->enable_beacon)
1676 wl->set_bss_type = BSS_TYPE_IBSS;
1677 else
1678 wl->set_bss_type = BSS_TYPE_STA_BSS;
1679 do_join = true;
1680 }
1681
1682 if (changed & BSS_CHANGED_CQM) {
1683 bool enable = false;
1684 if (bss_conf->cqm_rssi_thold)
1685 enable = true;
1686 ret = wl1271_acx_rssi_snr_trigger(wl, enable,
1687 bss_conf->cqm_rssi_thold,
1688 bss_conf->cqm_rssi_hyst);
1689 if (ret < 0)
1690 goto out;
1691 wl->rssi_thold = bss_conf->cqm_rssi_thold;
1692 }
1693
1658 if ((changed & BSS_CHANGED_BSSID) && 1694 if ((changed & BSS_CHANGED_BSSID) &&
1659 /* 1695 /*
1660 * Now we know the correct bssid, so we send a new join command 1696 * Now we know the correct bssid, so we send a new join command
1661 * and enable the BSSID filter 1697 * and enable the BSSID filter
1662 */ 1698 */
1663 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) { 1699 memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
1664 wl->rx_config |= CFG_BSSID_FILTER_EN;
1665 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN); 1700 memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
1701
1666 ret = wl1271_cmd_build_null_data(wl); 1702 ret = wl1271_cmd_build_null_data(wl);
1667 if (ret < 0) { 1703 if (ret < 0)
1668 wl1271_warning("cmd buld null data failed %d",
1669 ret);
1670 goto out_sleep; 1704 goto out_sleep;
1671 } 1705
1706 /* filter out all packets not from this BSSID */
1707 wl1271_configure_filters(wl, 0);
1672 1708
1673 /* Need to update the BSSID (for filtering etc) */ 1709 /* Need to update the BSSID (for filtering etc) */
1674 do_join = true; 1710 do_join = true;
@@ -1676,10 +1712,23 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1676 1712
1677 if (changed & BSS_CHANGED_ASSOC) { 1713 if (changed & BSS_CHANGED_ASSOC) {
1678 if (bss_conf->assoc) { 1714 if (bss_conf->assoc) {
1715 u32 rates;
1679 wl->aid = bss_conf->aid; 1716 wl->aid = bss_conf->aid;
1680 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1717 set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1681 1718
1682 /* 1719 /*
1720 * use basic rates from AP, and determine lowest rate
1721 * to use with control frames.
1722 */
1723 rates = bss_conf->basic_rates;
1724 wl->basic_rate_set = wl1271_tx_enabled_rates_get(wl,
1725 rates);
1726 wl->basic_rate = wl1271_min_rate_get(wl);
1727 ret = wl1271_acx_rate_policies(wl);
1728 if (ret < 0)
1729 goto out_sleep;
1730
1731 /*
1683 * with wl1271, we don't need to update the 1732 * with wl1271, we don't need to update the
1684 * beacon_int and dtim_period, because the firmware 1733 * beacon_int and dtim_period, because the firmware
1685 * updates it by itself when the first beacon is 1734 * updates it by itself when the first beacon is
@@ -1689,7 +1738,30 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1689 if (ret < 0) 1738 if (ret < 0)
1690 goto out_sleep; 1739 goto out_sleep;
1691 1740
1692 ret = wl1271_acx_aid(wl, wl->aid); 1741 /*
1742 * The SSID is intentionally set to NULL here - the
1743 * firmware will set the probe request with a
1744 * broadcast SSID regardless of what we set in the
1745 * template.
1746 */
1747 ret = wl1271_cmd_build_probe_req(wl, NULL, 0,
1748 NULL, 0, wl->band);
1749
1750 /* Enable the keep-alive feature */
1751 ret = wl1271_acx_keep_alive_mode(wl, true);
1752 if (ret < 0)
1753 goto out_sleep;
1754
1755 /*
1756 * This is awkward. The keep-alive configs must be done
1757 * *after* the join command, because otherwise it will
1758 * not work, but it must only be done *once* because
1759 * otherwise the firmware will start complaining.
1760 */
1761 do_keepalive = true;
1762
1763 /* enable the connection monitoring feature */
1764 ret = wl1271_acx_conn_monit_params(wl, true);
1693 if (ret < 0) 1765 if (ret < 0)
1694 goto out_sleep; 1766 goto out_sleep;
1695 1767
@@ -1705,6 +1777,22 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1705 /* use defaults when not associated */ 1777 /* use defaults when not associated */
1706 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 1778 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1707 wl->aid = 0; 1779 wl->aid = 0;
1780
1781 /* revert back to minimum rates for the current band */
1782 wl1271_set_band_rate(wl);
1783 wl->basic_rate = wl1271_min_rate_get(wl);
1784 ret = wl1271_acx_rate_policies(wl);
1785 if (ret < 0)
1786 goto out_sleep;
1787
1788 /* disable connection monitor features */
1789 ret = wl1271_acx_conn_monit_params(wl, false);
1790
1791 /* Disable the keep-alive feature */
1792 ret = wl1271_acx_keep_alive_mode(wl, false);
1793
1794 if (ret < 0)
1795 goto out_sleep;
1708 } 1796 }
1709 1797
1710 } 1798 }
@@ -1739,7 +1827,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1739 } 1827 }
1740 1828
1741 if (do_join) { 1829 if (do_join) {
1742 ret = wl1271_cmd_join(wl); 1830 ret = wl1271_cmd_join(wl, wl->set_bss_type);
1743 if (ret < 0) { 1831 if (ret < 0) {
1744 wl1271_warning("cmd join failed %d", ret); 1832 wl1271_warning("cmd join failed %d", ret);
1745 goto out_sleep; 1833 goto out_sleep;
@@ -1747,6 +1835,29 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1747 set_bit(WL1271_FLAG_JOINED, &wl->flags); 1835 set_bit(WL1271_FLAG_JOINED, &wl->flags);
1748 } 1836 }
1749 1837
1838 /*
1839 * The JOIN operation shuts down the firmware keep-alive as a side
1840 * effect, and the ACX_AID will start the keep-alive as a side effect.
1841 * Hence, for non-IBSS, the ACX_AID must always happen *after* the
1842 * JOIN operation, and the template config after the ACX_AID.
1843 */
1844 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
1845 ret = wl1271_acx_aid(wl, wl->aid);
1846 if (ret < 0)
1847 goto out_sleep;
1848 }
1849
1850 if (do_keepalive) {
1851 ret = wl1271_cmd_build_klv_null_data(wl);
1852 if (ret < 0)
1853 goto out_sleep;
1854 ret = wl1271_acx_keep_alive_config(
1855 wl, CMD_TEMPL_KLV_IDX_NULL_DATA,
1856 ACX_KEEP_ALIVE_TPL_VALID);
1857 if (ret < 0)
1858 goto out_sleep;
1859 }
1860
1750out_sleep: 1861out_sleep:
1751 wl1271_ps_elp_sleep(wl); 1862 wl1271_ps_elp_sleep(wl);
1752 1863
@@ -1758,6 +1869,7 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1758 const struct ieee80211_tx_queue_params *params) 1869 const struct ieee80211_tx_queue_params *params)
1759{ 1870{
1760 struct wl1271 *wl = hw->priv; 1871 struct wl1271 *wl = hw->priv;
1872 u8 ps_scheme;
1761 int ret; 1873 int ret;
1762 1874
1763 mutex_lock(&wl->mutex); 1875 mutex_lock(&wl->mutex);
@@ -1768,17 +1880,22 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1768 if (ret < 0) 1880 if (ret < 0)
1769 goto out; 1881 goto out;
1770 1882
1883 /* the txop is confed in units of 32us by the mac80211, we need us */
1771 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue), 1884 ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
1772 params->cw_min, params->cw_max, 1885 params->cw_min, params->cw_max,
1773 params->aifs, params->txop); 1886 params->aifs, params->txop << 5);
1774 if (ret < 0) 1887 if (ret < 0)
1775 goto out_sleep; 1888 goto out_sleep;
1776 1889
1890 if (params->uapsd)
1891 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
1892 else
1893 ps_scheme = CONF_PS_SCHEME_LEGACY;
1894
1777 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue), 1895 ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
1778 CONF_CHANNEL_TYPE_EDCF, 1896 CONF_CHANNEL_TYPE_EDCF,
1779 wl1271_tx_get_queue(queue), 1897 wl1271_tx_get_queue(queue),
1780 CONF_PS_SCHEME_LEGACY_PSPOLL, 1898 ps_scheme, CONF_ACK_POLICY_LEGACY, 0, 0);
1781 CONF_ACK_POLICY_LEGACY, 0, 0);
1782 if (ret < 0) 1899 if (ret < 0)
1783 goto out_sleep; 1900 goto out_sleep;
1784 1901
@@ -1852,6 +1969,36 @@ static struct ieee80211_channel wl1271_channels[] = {
1852 { .hw_value = 13, .center_freq = 2472, .max_power = 25 }, 1969 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
1853}; 1970};
1854 1971
1972/* mapping to indexes for wl1271_rates */
1973const static u8 wl1271_rate_to_idx_2ghz[] = {
1974 /* MCS rates are used only with 11n */
1975 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
1976 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
1977 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
1978 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
1979 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
1980 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
1981 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
1982 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
1983
1984 11, /* CONF_HW_RXTX_RATE_54 */
1985 10, /* CONF_HW_RXTX_RATE_48 */
1986 9, /* CONF_HW_RXTX_RATE_36 */
1987 8, /* CONF_HW_RXTX_RATE_24 */
1988
1989 /* TI-specific rate */
1990 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
1991
1992 7, /* CONF_HW_RXTX_RATE_18 */
1993 6, /* CONF_HW_RXTX_RATE_12 */
1994 3, /* CONF_HW_RXTX_RATE_11 */
1995 5, /* CONF_HW_RXTX_RATE_9 */
1996 4, /* CONF_HW_RXTX_RATE_6 */
1997 2, /* CONF_HW_RXTX_RATE_5_5 */
1998 1, /* CONF_HW_RXTX_RATE_2 */
1999 0 /* CONF_HW_RXTX_RATE_1 */
2000};
2001
1855/* can't be const, mac80211 writes to this */ 2002/* can't be const, mac80211 writes to this */
1856static struct ieee80211_supported_band wl1271_band_2ghz = { 2003static struct ieee80211_supported_band wl1271_band_2ghz = {
1857 .channels = wl1271_channels, 2004 .channels = wl1271_channels,
@@ -1934,6 +2081,35 @@ static struct ieee80211_channel wl1271_channels_5ghz[] = {
1934 { .hw_value = 165, .center_freq = 5825}, 2081 { .hw_value = 165, .center_freq = 5825},
1935}; 2082};
1936 2083
2084/* mapping to indexes for wl1271_rates_5ghz */
2085const static u8 wl1271_rate_to_idx_5ghz[] = {
2086 /* MCS rates are used only with 11n */
2087 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */
2088 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */
2089 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */
2090 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */
2091 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */
2092 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */
2093 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */
2094 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */
2095
2096 7, /* CONF_HW_RXTX_RATE_54 */
2097 6, /* CONF_HW_RXTX_RATE_48 */
2098 5, /* CONF_HW_RXTX_RATE_36 */
2099 4, /* CONF_HW_RXTX_RATE_24 */
2100
2101 /* TI-specific rate */
2102 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_22 */
2103
2104 3, /* CONF_HW_RXTX_RATE_18 */
2105 2, /* CONF_HW_RXTX_RATE_12 */
2106 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_11 */
2107 1, /* CONF_HW_RXTX_RATE_9 */
2108 0, /* CONF_HW_RXTX_RATE_6 */
2109 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_5_5 */
2110 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_2 */
2111 CONF_HW_RXTX_RATE_UNSUPPORTED /* CONF_HW_RXTX_RATE_1 */
2112};
1937 2113
1938static struct ieee80211_supported_band wl1271_band_5ghz = { 2114static struct ieee80211_supported_band wl1271_band_5ghz = {
1939 .channels = wl1271_channels_5ghz, 2115 .channels = wl1271_channels_5ghz,
@@ -1942,13 +2118,17 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
1942 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 2118 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
1943}; 2119};
1944 2120
2121const static u8 *wl1271_band_rate_to_idx[] = {
2122 [IEEE80211_BAND_2GHZ] = wl1271_rate_to_idx_2ghz,
2123 [IEEE80211_BAND_5GHZ] = wl1271_rate_to_idx_5ghz
2124};
2125
1945static const struct ieee80211_ops wl1271_ops = { 2126static const struct ieee80211_ops wl1271_ops = {
1946 .start = wl1271_op_start, 2127 .start = wl1271_op_start,
1947 .stop = wl1271_op_stop, 2128 .stop = wl1271_op_stop,
1948 .add_interface = wl1271_op_add_interface, 2129 .add_interface = wl1271_op_add_interface,
1949 .remove_interface = wl1271_op_remove_interface, 2130 .remove_interface = wl1271_op_remove_interface,
1950 .config = wl1271_op_config, 2131 .config = wl1271_op_config,
1951/* .config_interface = wl1271_op_config_interface, */
1952 .prepare_multicast = wl1271_op_prepare_multicast, 2132 .prepare_multicast = wl1271_op_prepare_multicast,
1953 .configure_filter = wl1271_op_configure_filter, 2133 .configure_filter = wl1271_op_configure_filter,
1954 .tx = wl1271_op_tx, 2134 .tx = wl1271_op_tx,
@@ -1960,7 +2140,90 @@ static const struct ieee80211_ops wl1271_ops = {
1960 CFG80211_TESTMODE_CMD(wl1271_tm_cmd) 2140 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
1961}; 2141};
1962 2142
1963static int wl1271_register_hw(struct wl1271 *wl) 2143
2144u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate)
2145{
2146 u8 idx;
2147
2148 BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
2149
2150 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
2151 wl1271_error("Illegal RX rate from HW: %d", rate);
2152 return 0;
2153 }
2154
2155 idx = wl1271_band_rate_to_idx[wl->band][rate];
2156 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
2157 wl1271_error("Unsupported RX rate from HW: %d", rate);
2158 return 0;
2159 }
2160
2161 return idx;
2162}
2163
2164static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
2165 struct device_attribute *attr,
2166 char *buf)
2167{
2168 struct wl1271 *wl = dev_get_drvdata(dev);
2169 ssize_t len;
2170
2171 /* FIXME: what's the maximum length of buf? page size?*/
2172 len = 500;
2173
2174 mutex_lock(&wl->mutex);
2175 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
2176 wl->sg_enabled);
2177 mutex_unlock(&wl->mutex);
2178
2179 return len;
2180
2181}
2182
2183static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
2184 struct device_attribute *attr,
2185 const char *buf, size_t count)
2186{
2187 struct wl1271 *wl = dev_get_drvdata(dev);
2188 unsigned long res;
2189 int ret;
2190
2191 ret = strict_strtoul(buf, 10, &res);
2192
2193 if (ret < 0) {
2194 wl1271_warning("incorrect value written to bt_coex_mode");
2195 return count;
2196 }
2197
2198 mutex_lock(&wl->mutex);
2199
2200 res = !!res;
2201
2202 if (res == wl->sg_enabled)
2203 goto out;
2204
2205 wl->sg_enabled = res;
2206
2207 if (wl->state == WL1271_STATE_OFF)
2208 goto out;
2209
2210 ret = wl1271_ps_elp_wakeup(wl, false);
2211 if (ret < 0)
2212 goto out;
2213
2214 wl1271_acx_sg_enable(wl, wl->sg_enabled);
2215 wl1271_ps_elp_sleep(wl);
2216
2217 out:
2218 mutex_unlock(&wl->mutex);
2219 return count;
2220}
2221
2222static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
2223 wl1271_sysfs_show_bt_coex_state,
2224 wl1271_sysfs_store_bt_coex_state);
2225
2226int wl1271_register_hw(struct wl1271 *wl)
1964{ 2227{
1965 int ret; 2228 int ret;
1966 2229
@@ -1981,8 +2244,17 @@ static int wl1271_register_hw(struct wl1271 *wl)
1981 2244
1982 return 0; 2245 return 0;
1983} 2246}
2247EXPORT_SYMBOL_GPL(wl1271_register_hw);
1984 2248
1985static int wl1271_init_ieee80211(struct wl1271 *wl) 2249void wl1271_unregister_hw(struct wl1271 *wl)
2250{
2251 ieee80211_unregister_hw(wl->hw);
2252 wl->mac80211_registered = false;
2253
2254}
2255EXPORT_SYMBOL_GPL(wl1271_unregister_hw);
2256
2257int wl1271_init_ieee80211(struct wl1271 *wl)
1986{ 2258{
1987 /* The tx descriptor buffer and the TKIP space. */ 2259 /* The tx descriptor buffer and the TKIP space. */
1988 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE + 2260 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
@@ -1991,11 +2263,16 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
1991 /* unit us */ 2263 /* unit us */
1992 /* FIXME: find a proper value */ 2264 /* FIXME: find a proper value */
1993 wl->hw->channel_change_time = 10000; 2265 wl->hw->channel_change_time = 10000;
2266 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
1994 2267
1995 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 2268 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1996 IEEE80211_HW_NOISE_DBM | 2269 IEEE80211_HW_NOISE_DBM |
1997 IEEE80211_HW_BEACON_FILTER | 2270 IEEE80211_HW_BEACON_FILTER |
1998 IEEE80211_HW_SUPPORTS_PS; 2271 IEEE80211_HW_SUPPORTS_PS |
2272 IEEE80211_HW_SUPPORTS_UAPSD |
2273 IEEE80211_HW_HAS_RATE_CONTROL |
2274 IEEE80211_HW_CONNECTION_MONITOR |
2275 IEEE80211_HW_SUPPORTS_CQM_RSSI;
1999 2276
2000 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2277 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2001 BIT(NL80211_IFTYPE_ADHOC); 2278 BIT(NL80211_IFTYPE_ADHOC);
@@ -2005,51 +2282,53 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
2005 if (wl1271_11a_enabled()) 2282 if (wl1271_11a_enabled())
2006 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; 2283 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
2007 2284
2008 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); 2285 wl->hw->queues = 4;
2009 2286 wl->hw->max_rates = 1;
2010 return 0;
2011}
2012 2287
2013static void wl1271_device_release(struct device *dev) 2288 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2014{
2015 2289
2290 return 0;
2016} 2291}
2017 2292EXPORT_SYMBOL_GPL(wl1271_init_ieee80211);
2018static struct platform_device wl1271_device = {
2019 .name = "wl1271",
2020 .id = -1,
2021
2022 /* device model insists to have a release function */
2023 .dev = {
2024 .release = wl1271_device_release,
2025 },
2026};
2027 2293
2028#define WL1271_DEFAULT_CHANNEL 0 2294#define WL1271_DEFAULT_CHANNEL 0
2029 2295
2030static struct ieee80211_hw *wl1271_alloc_hw(void) 2296struct ieee80211_hw *wl1271_alloc_hw(void)
2031{ 2297{
2032 struct ieee80211_hw *hw; 2298 struct ieee80211_hw *hw;
2299 struct platform_device *plat_dev = NULL;
2033 struct wl1271 *wl; 2300 struct wl1271 *wl;
2034 int i; 2301 int i, ret;
2035 2302
2036 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops); 2303 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
2037 if (!hw) { 2304 if (!hw) {
2038 wl1271_error("could not alloc ieee80211_hw"); 2305 wl1271_error("could not alloc ieee80211_hw");
2039 return ERR_PTR(-ENOMEM); 2306 ret = -ENOMEM;
2307 goto err_hw_alloc;
2040 } 2308 }
2041 2309
2310 plat_dev = kmalloc(sizeof(wl1271_device), GFP_KERNEL);
2311 if (!plat_dev) {
2312 wl1271_error("could not allocate platform_device");
2313 ret = -ENOMEM;
2314 goto err_plat_alloc;
2315 }
2316
2317 memcpy(plat_dev, &wl1271_device, sizeof(wl1271_device));
2318
2042 wl = hw->priv; 2319 wl = hw->priv;
2043 memset(wl, 0, sizeof(*wl)); 2320 memset(wl, 0, sizeof(*wl));
2044 2321
2045 INIT_LIST_HEAD(&wl->list); 2322 INIT_LIST_HEAD(&wl->list);
2046 2323
2047 wl->hw = hw; 2324 wl->hw = hw;
2325 wl->plat_dev = plat_dev;
2048 2326
2049 skb_queue_head_init(&wl->tx_queue); 2327 skb_queue_head_init(&wl->tx_queue);
2050 2328
2051 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2329 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2052 wl->channel = WL1271_DEFAULT_CHANNEL; 2330 wl->channel = WL1271_DEFAULT_CHANNEL;
2331 wl->beacon_int = WL1271_DEFAULT_BEACON_INT;
2053 wl->default_key = 0; 2332 wl->default_key = 0;
2054 wl->rx_counter = 0; 2333 wl->rx_counter = 0;
2055 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 2334 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
@@ -2057,11 +2336,13 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
2057 wl->psm_entry_retry = 0; 2336 wl->psm_entry_retry = 0;
2058 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 2337 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
2059 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC; 2338 wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2339 wl->basic_rate = CONF_TX_RATE_MASK_BASIC;
2060 wl->rate_set = CONF_TX_RATE_MASK_BASIC; 2340 wl->rate_set = CONF_TX_RATE_MASK_BASIC;
2061 wl->sta_rate_set = 0; 2341 wl->sta_rate_set = 0;
2062 wl->band = IEEE80211_BAND_2GHZ; 2342 wl->band = IEEE80211_BAND_2GHZ;
2063 wl->vif = NULL; 2343 wl->vif = NULL;
2064 wl->flags = 0; 2344 wl->flags = 0;
2345 wl->sg_enabled = true;
2065 2346
2066 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 2347 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
2067 wl->tx_frames[i] = NULL; 2348 wl->tx_frames[i] = NULL;
@@ -2074,167 +2355,62 @@ static struct ieee80211_hw *wl1271_alloc_hw(void)
2074 /* Apply default driver configuration. */ 2355 /* Apply default driver configuration. */
2075 wl1271_conf_init(wl); 2356 wl1271_conf_init(wl);
2076 2357
2077 return hw; 2358 wl1271_debugfs_init(wl);
2078}
2079
2080int wl1271_free_hw(struct wl1271 *wl)
2081{
2082 ieee80211_unregister_hw(wl->hw);
2083
2084 wl1271_debugfs_exit(wl);
2085
2086 kfree(wl->target_mem_map);
2087 vfree(wl->fw);
2088 wl->fw = NULL;
2089 kfree(wl->nvs);
2090 wl->nvs = NULL;
2091
2092 kfree(wl->fw_status);
2093 kfree(wl->tx_res_if);
2094
2095 ieee80211_free_hw(wl->hw);
2096
2097 return 0;
2098}
2099
2100static int __devinit wl1271_probe(struct spi_device *spi)
2101{
2102 struct wl12xx_platform_data *pdata;
2103 struct ieee80211_hw *hw;
2104 struct wl1271 *wl;
2105 int ret;
2106
2107 pdata = spi->dev.platform_data;
2108 if (!pdata) {
2109 wl1271_error("no platform data");
2110 return -ENODEV;
2111 }
2112
2113 hw = wl1271_alloc_hw();
2114 if (IS_ERR(hw))
2115 return PTR_ERR(hw);
2116
2117 wl = hw->priv;
2118
2119 dev_set_drvdata(&spi->dev, wl);
2120 wl->spi = spi;
2121
2122 /* This is the only SPI value that we need to set here, the rest
2123 * comes from the board-peripherals file */
2124 spi->bits_per_word = 32;
2125
2126 ret = spi_setup(spi);
2127 if (ret < 0) {
2128 wl1271_error("spi_setup failed");
2129 goto out_free;
2130 }
2131
2132 wl->set_power = pdata->set_power;
2133 if (!wl->set_power) {
2134 wl1271_error("set power function missing in platform data");
2135 ret = -ENODEV;
2136 goto out_free;
2137 }
2138
2139 wl->irq = spi->irq;
2140 if (wl->irq < 0) {
2141 wl1271_error("irq missing in platform data");
2142 ret = -ENODEV;
2143 goto out_free;
2144 }
2145
2146 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
2147 if (ret < 0) {
2148 wl1271_error("request_irq() failed: %d", ret);
2149 goto out_free;
2150 }
2151
2152 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
2153
2154 disable_irq(wl->irq);
2155 2359
2156 ret = platform_device_register(&wl1271_device); 2360 /* Register platform device */
2361 ret = platform_device_register(wl->plat_dev);
2157 if (ret) { 2362 if (ret) {
2158 wl1271_error("couldn't register platform device"); 2363 wl1271_error("couldn't register platform device");
2159 goto out_irq; 2364 goto err_hw;
2160 } 2365 }
2161 dev_set_drvdata(&wl1271_device.dev, wl); 2366 dev_set_drvdata(&wl->plat_dev->dev, wl);
2162
2163 ret = wl1271_init_ieee80211(wl);
2164 if (ret)
2165 goto out_platform;
2166 2367
2167 ret = wl1271_register_hw(wl); 2368 /* Create sysfs file to control bt coex state */
2168 if (ret) 2369 ret = device_create_file(&wl->plat_dev->dev, &dev_attr_bt_coex_state);
2169 goto out_platform; 2370 if (ret < 0) {
2170 2371 wl1271_error("failed to create sysfs file bt_coex_state");
2171 wl1271_debugfs_init(wl); 2372 goto err_platform;
2172 2373 }
2173 wl1271_notice("initialized");
2174 2374
2175 return 0; 2375 return hw;
2176 2376
2177 out_platform: 2377err_platform:
2178 platform_device_unregister(&wl1271_device); 2378 platform_device_unregister(wl->plat_dev);
2179 2379
2180 out_irq: 2380err_hw:
2181 free_irq(wl->irq, wl); 2381 wl1271_debugfs_exit(wl);
2382 kfree(plat_dev);
2182 2383
2183 out_free: 2384err_plat_alloc:
2184 ieee80211_free_hw(hw); 2385 ieee80211_free_hw(hw);
2185 2386
2186 return ret; 2387err_hw_alloc:
2187}
2188
2189static int __devexit wl1271_remove(struct spi_device *spi)
2190{
2191 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
2192
2193 platform_device_unregister(&wl1271_device);
2194 free_irq(wl->irq, wl);
2195 2388
2196 wl1271_free_hw(wl); 2389 return ERR_PTR(ret);
2197
2198 return 0;
2199} 2390}
2391EXPORT_SYMBOL_GPL(wl1271_alloc_hw);
2200 2392
2201 2393int wl1271_free_hw(struct wl1271 *wl)
2202static struct spi_driver wl1271_spi_driver = {
2203 .driver = {
2204 .name = "wl1271",
2205 .bus = &spi_bus_type,
2206 .owner = THIS_MODULE,
2207 },
2208
2209 .probe = wl1271_probe,
2210 .remove = __devexit_p(wl1271_remove),
2211};
2212
2213static int __init wl1271_init(void)
2214{ 2394{
2215 int ret; 2395 platform_device_unregister(wl->plat_dev);
2396 kfree(wl->plat_dev);
2216 2397
2217 ret = spi_register_driver(&wl1271_spi_driver); 2398 wl1271_debugfs_exit(wl);
2218 if (ret < 0) {
2219 wl1271_error("failed to register spi driver: %d", ret);
2220 goto out;
2221 }
2222 2399
2223out: 2400 vfree(wl->fw);
2224 return ret; 2401 wl->fw = NULL;
2225} 2402 kfree(wl->nvs);
2403 wl->nvs = NULL;
2226 2404
2227static void __exit wl1271_exit(void) 2405 kfree(wl->fw_status);
2228{ 2406 kfree(wl->tx_res_if);
2229 spi_unregister_driver(&wl1271_spi_driver);
2230 2407
2231 wl1271_notice("unloaded"); 2408 ieee80211_free_hw(wl->hw);
2232}
2233 2409
2234module_init(wl1271_init); 2410 return 0;
2235module_exit(wl1271_exit); 2411}
2412EXPORT_SYMBOL_GPL(wl1271_free_hw);
2236 2413
2237MODULE_LICENSE("GPL"); 2414MODULE_LICENSE("GPL");
2238MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 2415MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
2239MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 2416MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
2240MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index e2b1ebf096e8..a5e60e0403e5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -23,7 +23,6 @@
23 23
24#include "wl1271_reg.h" 24#include "wl1271_reg.h"
25#include "wl1271_ps.h" 25#include "wl1271_ps.h"
26#include "wl1271_spi.h"
27#include "wl1271_io.h" 26#include "wl1271_io.h"
28 27
29#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
@@ -41,7 +40,8 @@ void wl1271_elp_work(struct work_struct *work)
41 mutex_lock(&wl->mutex); 40 mutex_lock(&wl->mutex);
42 41
43 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) || 42 if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
44 !test_bit(WL1271_FLAG_PSM, &wl->flags)) 43 (!test_bit(WL1271_FLAG_PSM, &wl->flags) &&
44 !test_bit(WL1271_FLAG_IDLE, &wl->flags)))
45 goto out; 45 goto out;
46 46
47 wl1271_debug(DEBUG_PSM, "chip to elp"); 47 wl1271_debug(DEBUG_PSM, "chip to elp");
@@ -57,7 +57,8 @@ out:
57/* Routines to toggle sleep mode while in ELP */ 57/* Routines to toggle sleep mode while in ELP */
58void wl1271_ps_elp_sleep(struct wl1271 *wl) 58void wl1271_ps_elp_sleep(struct wl1271 *wl)
59{ 59{
60 if (test_bit(WL1271_FLAG_PSM, &wl->flags)) { 60 if (test_bit(WL1271_FLAG_PSM, &wl->flags) ||
61 test_bit(WL1271_FLAG_IDLE, &wl->flags)) {
61 cancel_delayed_work(&wl->elp_work); 62 cancel_delayed_work(&wl->elp_work);
62 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, 63 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
63 msecs_to_jiffies(ELP_ENTRY_DELAY)); 64 msecs_to_jiffies(ELP_ENTRY_DELAY));
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index c723d9c7e131..57f4bfd959c8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -27,7 +27,6 @@
27#include "wl1271_acx.h" 27#include "wl1271_acx.h"
28#include "wl1271_reg.h" 28#include "wl1271_reg.h"
29#include "wl1271_rx.h" 29#include "wl1271_rx.h"
30#include "wl1271_spi.h"
31#include "wl1271_io.h" 30#include "wl1271_io.h"
32 31
33static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
@@ -44,66 +43,6 @@ static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
44 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV; 43 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
45} 44}
46 45
47/* The values of this table must match the wl1271_rates[] array */
48static u8 wl1271_rx_rate_to_idx[] = {
49 /* MCS rates are used only with 11n */
50 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
51 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
52 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
53 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
54 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
55 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
56 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
57 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
58
59 11, /* WL1271_RATE_54 */
60 10, /* WL1271_RATE_48 */
61 9, /* WL1271_RATE_36 */
62 8, /* WL1271_RATE_24 */
63
64 /* TI-specific rate */
65 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
66
67 7, /* WL1271_RATE_18 */
68 6, /* WL1271_RATE_12 */
69 3, /* WL1271_RATE_11 */
70 5, /* WL1271_RATE_9 */
71 4, /* WL1271_RATE_6 */
72 2, /* WL1271_RATE_5_5 */
73 1, /* WL1271_RATE_2 */
74 0 /* WL1271_RATE_1 */
75};
76
77/* The values of this table must match the wl1271_rates[] array */
78static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
79 /* MCS rates are used only with 11n */
80 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
81 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
82 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
83 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
84 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
85 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
86 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
87 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
88
89 7, /* WL1271_RATE_54 */
90 6, /* WL1271_RATE_48 */
91 5, /* WL1271_RATE_36 */
92 4, /* WL1271_RATE_24 */
93
94 /* TI-specific rate */
95 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
96
97 3, /* WL1271_RATE_18 */
98 2, /* WL1271_RATE_12 */
99 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
100 1, /* WL1271_RATE_9 */
101 0, /* WL1271_RATE_6 */
102 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
103 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
104 WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
105};
106
107static void wl1271_rx_status(struct wl1271 *wl, 46static void wl1271_rx_status(struct wl1271 *wl,
108 struct wl1271_rx_descriptor *desc, 47 struct wl1271_rx_descriptor *desc,
109 struct ieee80211_rx_status *status, 48 struct ieee80211_rx_status *status,
@@ -111,20 +50,8 @@ static void wl1271_rx_status(struct wl1271 *wl,
111{ 50{
112 memset(status, 0, sizeof(struct ieee80211_rx_status)); 51 memset(status, 0, sizeof(struct ieee80211_rx_status));
113 52
114 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == 53 status->band = wl->band;
115 WL1271_RX_DESC_BAND_BG) { 54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate);
116 status->band = IEEE80211_BAND_2GHZ;
117 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
118 } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
119 WL1271_RX_DESC_BAND_A) {
120 status->band = IEEE80211_BAND_5GHZ;
121 status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
122 } else
123 wl1271_warning("unsupported band 0x%x",
124 desc->flags & WL1271_RX_DESC_BAND_MASK);
125
126 if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
127 wl1271_warning("unsupported rate");
128 55
129 /* 56 /*
130 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 57 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
@@ -134,13 +61,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
134 */ 61 */
135 status->signal = desc->rssi; 62 status->signal = desc->rssi;
136 63
137 /*
138 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
139 * need to divide by two for now, but TI has been discussing about
140 * changing it. This needs to be rechecked.
141 */
142 status->noise = desc->rssi - (desc->snr >> 1);
143
144 status->freq = ieee80211_channel_to_frequency(desc->channel); 64 status->freq = ieee80211_channel_to_frequency(desc->channel);
145 65
146 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 66 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
@@ -162,6 +82,13 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
162 u8 *buf; 82 u8 *buf;
163 u8 beacon = 0; 83 u8 beacon = 0;
164 84
85 /*
86 * In PLT mode we seem to get frames and mac80211 warns about them,
87 * workaround this by not retrieving them at all.
88 */
89 if (unlikely(wl->state == WL1271_STATE_PLT))
90 return;
91
165 skb = __dev_alloc_skb(length, GFP_KERNEL); 92 skb = __dev_alloc_skb(length, GFP_KERNEL);
166 if (!skb) { 93 if (!skb) {
167 wl1271_error("Couldn't allocate RX frame"); 94 wl1271_error("Couldn't allocate RX frame");
@@ -220,6 +147,7 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
220 147
221 wl->rx_counter++; 148 wl->rx_counter++;
222 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 149 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
223 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
224 } 150 }
151
152 wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
225} 153}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index 1ae6d1783ed4..b89be4758e78 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -43,7 +43,6 @@
43#define RX_MAX_PACKET_ID 3 43#define RX_MAX_PACKET_ID 3
44 44
45#define NUM_RX_PKT_DESC_MOD_MASK 7 45#define NUM_RX_PKT_DESC_MOD_MASK 7
46#define WL1271_RX_RATE_UNSUPPORTED 0xFF
47 46
48#define RX_DESC_VALID_FCS 0x0001 47#define RX_DESC_VALID_FCS 0x0001
49#define RX_DESC_MATCH_RXADDR1 0x0002 48#define RX_DESC_MATCH_RXADDR1 0x0002
@@ -117,5 +116,6 @@ struct wl1271_rx_descriptor {
117} __attribute__ ((packed)); 116} __attribute__ ((packed));
118 117
119void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
120 120
121#endif 121#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/wl1271_sdio.c
new file mode 100644
index 000000000000..d3d6f302f705
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_sdio.c
@@ -0,0 +1,291 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009-2010 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/irq.h>
25#include <linux/module.h>
26#include <linux/crc7.h>
27#include <linux/vmalloc.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/mmc/sdio_ids.h>
30#include <linux/mmc/card.h>
31#include <plat/gpio.h>
32
33#include "wl1271.h"
34#include "wl12xx_80211.h"
35#include "wl1271_io.h"
36
37
38#define RX71_WL1271_IRQ_GPIO 42
39
40#ifndef SDIO_VENDOR_ID_TI
41#define SDIO_VENDOR_ID_TI 0x0097
42#endif
43
44#ifndef SDIO_DEVICE_ID_TI_WL1271
45#define SDIO_DEVICE_ID_TI_WL1271 0x4076
46#endif
47
48static const struct sdio_device_id wl1271_devices[] = {
49 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
50 {}
51};
52MODULE_DEVICE_TABLE(sdio, wl1271_devices);
53
54static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
55{
56 return wl->if_priv;
57}
58
59static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
60{
61 return &(wl_to_func(wl)->dev);
62}
63
64static irqreturn_t wl1271_irq(int irq, void *cookie)
65{
66 struct wl1271 *wl = cookie;
67 unsigned long flags;
68
69 wl1271_debug(DEBUG_IRQ, "IRQ");
70
71 /* complete the ELP completion */
72 spin_lock_irqsave(&wl->wl_lock, flags);
73 if (wl->elp_compl) {
74 complete(wl->elp_compl);
75 wl->elp_compl = NULL;
76 }
77
78 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
79 ieee80211_queue_work(wl->hw, &wl->irq_work);
80 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
81 spin_unlock_irqrestore(&wl->wl_lock, flags);
82
83 return IRQ_HANDLED;
84}
85
86static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
87{
88 disable_irq(wl->irq);
89}
90
91static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
92{
93 enable_irq(wl->irq);
94}
95
96static void wl1271_sdio_reset(struct wl1271 *wl)
97{
98}
99
100static void wl1271_sdio_init(struct wl1271 *wl)
101{
102}
103
104static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
105 size_t len, bool fixed)
106{
107 int ret;
108 struct sdio_func *func = wl_to_func(wl);
109
110 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
111 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
112 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
113 addr, ((u8 *)buf)[0]);
114 } else {
115 if (fixed)
116 ret = sdio_readsb(func, buf, addr, len);
117 else
118 ret = sdio_memcpy_fromio(func, buf, addr, len);
119
120 wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
121 addr, len);
122 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
123 }
124
125 if (ret)
126 wl1271_error("sdio read failed (%d)", ret);
127
128}
129
130static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
131 size_t len, bool fixed)
132{
133 int ret;
134 struct sdio_func *func = wl_to_func(wl);
135
136 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
137 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
138 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
139 addr, ((u8 *)buf)[0]);
140 } else {
141 wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
142 addr, len);
143 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
144
145 if (fixed)
146 ret = sdio_writesb(func, addr, buf, len);
147 else
148 ret = sdio_memcpy_toio(func, addr, buf, len);
149 }
150 if (ret)
151 wl1271_error("sdio write failed (%d)", ret);
152
153}
154
155static void wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
156{
157 struct sdio_func *func = wl_to_func(wl);
158
159 /* Let the SDIO stack handle wlan_enable control, so we
160 * keep host claimed while wlan is in use to keep wl1271
161 * alive.
162 */
163 if (enable) {
164 sdio_claim_host(func);
165 sdio_enable_func(func);
166 } else {
167 sdio_disable_func(func);
168 sdio_release_host(func);
169 }
170}
171
172static struct wl1271_if_operations sdio_ops = {
173 .read = wl1271_sdio_raw_read,
174 .write = wl1271_sdio_raw_write,
175 .reset = wl1271_sdio_reset,
176 .init = wl1271_sdio_init,
177 .power = wl1271_sdio_set_power,
178 .dev = wl1271_sdio_wl_to_dev,
179 .enable_irq = wl1271_sdio_enable_interrupts,
180 .disable_irq = wl1271_sdio_disable_interrupts
181};
182
183static int __devinit wl1271_probe(struct sdio_func *func,
184 const struct sdio_device_id *id)
185{
186 struct ieee80211_hw *hw;
187 struct wl1271 *wl;
188 int ret;
189
190 /* We are only able to handle the wlan function */
191 if (func->num != 0x02)
192 return -ENODEV;
193
194 hw = wl1271_alloc_hw();
195 if (IS_ERR(hw))
196 return PTR_ERR(hw);
197
198 wl = hw->priv;
199
200 wl->if_priv = func;
201 wl->if_ops = &sdio_ops;
202
203 /* Grab access to FN0 for ELP reg. */
204 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
205
206 wl->irq = gpio_to_irq(RX71_WL1271_IRQ_GPIO);
207 if (wl->irq < 0) {
208 ret = wl->irq;
209 wl1271_error("could not get irq!");
210 goto out_free;
211 }
212
213 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
214 if (ret < 0) {
215 wl1271_error("request_irq() failed: %d", ret);
216 goto out_free;
217 }
218
219 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
220
221 disable_irq(wl->irq);
222
223 ret = wl1271_init_ieee80211(wl);
224 if (ret)
225 goto out_irq;
226
227 ret = wl1271_register_hw(wl);
228 if (ret)
229 goto out_irq;
230
231 sdio_set_drvdata(func, wl);
232
233 wl1271_notice("initialized");
234
235 return 0;
236
237 out_irq:
238 free_irq(wl->irq, wl);
239
240
241 out_free:
242 wl1271_free_hw(wl);
243
244 return ret;
245}
246
247static void __devexit wl1271_remove(struct sdio_func *func)
248{
249 struct wl1271 *wl = sdio_get_drvdata(func);
250
251 free_irq(wl->irq, wl);
252
253 wl1271_unregister_hw(wl);
254 wl1271_free_hw(wl);
255}
256
257static struct sdio_driver wl1271_sdio_driver = {
258 .name = "wl1271_sdio",
259 .id_table = wl1271_devices,
260 .probe = wl1271_probe,
261 .remove = __devexit_p(wl1271_remove),
262};
263
264static int __init wl1271_init(void)
265{
266 int ret;
267
268 ret = sdio_register_driver(&wl1271_sdio_driver);
269 if (ret < 0) {
270 wl1271_error("failed to register sdio driver: %d", ret);
271 goto out;
272 }
273
274out:
275 return ret;
276}
277
278static void __exit wl1271_exit(void)
279{
280 sdio_unregister_driver(&wl1271_sdio_driver);
281
282 wl1271_notice("unloaded");
283}
284
285module_init(wl1271_init);
286module_exit(wl1271_exit);
287
288MODULE_LICENSE("GPL");
289MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
290MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
291MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 053c84aceb49..5189b812f939 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -21,18 +21,69 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/irq.h>
24#include <linux/module.h> 25#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28#include <linux/spi/wl12xx.h>
28#include <linux/slab.h> 29#include <linux/slab.h>
29 30
30#include "wl1271.h" 31#include "wl1271.h"
31#include "wl12xx_80211.h" 32#include "wl12xx_80211.h"
32#include "wl1271_spi.h" 33#include "wl1271_io.h"
34
35#include "wl1271_reg.h"
36
37#define WSPI_CMD_READ 0x40000000
38#define WSPI_CMD_WRITE 0x00000000
39#define WSPI_CMD_FIXED 0x20000000
40#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
41#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
42#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
43
44#define WSPI_INIT_CMD_CRC_LEN 5
45
46#define WSPI_INIT_CMD_START 0x00
47#define WSPI_INIT_CMD_TX 0x40
48/* the extra bypass bit is sampled by the TNET as '1' */
49#define WSPI_INIT_CMD_BYPASS_BIT 0x80
50#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
51#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
52#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
53#define WSPI_INIT_CMD_IOD 0x40
54#define WSPI_INIT_CMD_IP 0x20
55#define WSPI_INIT_CMD_CS 0x10
56#define WSPI_INIT_CMD_WS 0x08
57#define WSPI_INIT_CMD_WSPI 0x01
58#define WSPI_INIT_CMD_END 0x01
59
60#define WSPI_INIT_CMD_LEN 8
61
62#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
63 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
64#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
65
66static inline struct spi_device *wl_to_spi(struct wl1271 *wl)
67{
68 return wl->if_priv;
69}
33 70
71static struct device *wl1271_spi_wl_to_dev(struct wl1271 *wl)
72{
73 return &(wl_to_spi(wl)->dev);
74}
34 75
35void wl1271_spi_reset(struct wl1271 *wl) 76static void wl1271_spi_disable_interrupts(struct wl1271 *wl)
77{
78 disable_irq(wl->irq);
79}
80
81static void wl1271_spi_enable_interrupts(struct wl1271 *wl)
82{
83 enable_irq(wl->irq);
84}
85
86static void wl1271_spi_reset(struct wl1271 *wl)
36{ 87{
37 u8 *cmd; 88 u8 *cmd;
38 struct spi_transfer t; 89 struct spi_transfer t;
@@ -53,12 +104,13 @@ void wl1271_spi_reset(struct wl1271 *wl)
53 t.len = WSPI_INIT_CMD_LEN; 104 t.len = WSPI_INIT_CMD_LEN;
54 spi_message_add_tail(&t, &m); 105 spi_message_add_tail(&t, &m);
55 106
56 spi_sync(wl->spi, &m); 107 spi_sync(wl_to_spi(wl), &m);
108 kfree(cmd);
57 109
58 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN); 110 wl1271_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
59} 111}
60 112
61void wl1271_spi_init(struct wl1271 *wl) 113static void wl1271_spi_init(struct wl1271 *wl)
62{ 114{
63 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd; 115 u8 crc[WSPI_INIT_CMD_CRC_LEN], *cmd;
64 struct spi_transfer t; 116 struct spi_transfer t;
@@ -107,48 +159,25 @@ void wl1271_spi_init(struct wl1271 *wl)
107 t.len = WSPI_INIT_CMD_LEN; 159 t.len = WSPI_INIT_CMD_LEN;
108 spi_message_add_tail(&t, &m); 160 spi_message_add_tail(&t, &m);
109 161
110 spi_sync(wl->spi, &m); 162 spi_sync(wl_to_spi(wl), &m);
163 kfree(cmd);
111 164
112 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN); 165 wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
113} 166}
114 167
115#define WL1271_BUSY_WORD_TIMEOUT 1000 168#define WL1271_BUSY_WORD_TIMEOUT 1000
116 169
117/* FIXME: Check busy words, removed due to SPI bug */ 170static int wl1271_spi_read_busy(struct wl1271 *wl)
118#if 0
119static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
120{ 171{
121 struct spi_transfer t[1]; 172 struct spi_transfer t[1];
122 struct spi_message m; 173 struct spi_message m;
123 u32 *busy_buf; 174 u32 *busy_buf;
124 int num_busy_bytes = 0; 175 int num_busy_bytes = 0;
125 176
126 wl1271_info("spi read BUSY!");
127
128 /*
129 * Look for the non-busy word in the read buffer, and if found,
130 * read in the remaining data into the buffer.
131 */
132 busy_buf = (u32 *)buf;
133 for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
134 num_busy_bytes += sizeof(u32);
135 if (*busy_buf & 0x1) {
136 spi_message_init(&m);
137 memset(t, 0, sizeof(t));
138 memmove(buf, busy_buf, len - num_busy_bytes);
139 t[0].rx_buf = buf + (len - num_busy_bytes);
140 t[0].len = num_busy_bytes;
141 spi_message_add_tail(&t[0], &m);
142 spi_sync(wl->spi, &m);
143 return;
144 }
145 }
146
147 /* 177 /*
148 * Read further busy words from SPI until a non-busy word is 178 * Read further busy words from SPI until a non-busy word is
149 * encountered, then read the data itself into the buffer. 179 * encountered, then read the data itself into the buffer.
150 */ 180 */
151 wl1271_info("spi read BUSY-polling needed!");
152 181
153 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT; 182 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
154 busy_buf = wl->buffer_busyword; 183 busy_buf = wl->buffer_busyword;
@@ -158,28 +187,21 @@ static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
158 memset(t, 0, sizeof(t)); 187 memset(t, 0, sizeof(t));
159 t[0].rx_buf = busy_buf; 188 t[0].rx_buf = busy_buf;
160 t[0].len = sizeof(u32); 189 t[0].len = sizeof(u32);
190 t[0].cs_change = true;
161 spi_message_add_tail(&t[0], &m); 191 spi_message_add_tail(&t[0], &m);
162 spi_sync(wl->spi, &m); 192 spi_sync(wl_to_spi(wl), &m);
163 193
164 if (*busy_buf & 0x1) { 194 if (*busy_buf & 0x1)
165 spi_message_init(&m); 195 return 0;
166 memset(t, 0, sizeof(t));
167 t[0].rx_buf = buf;
168 t[0].len = len;
169 spi_message_add_tail(&t[0], &m);
170 spi_sync(wl->spi, &m);
171 return;
172 }
173 } 196 }
174 197
175 /* The SPI bus is unresponsive, the read failed. */ 198 /* The SPI bus is unresponsive, the read failed. */
176 memset(buf, 0, len);
177 wl1271_error("SPI read busy-word timeout!\n"); 199 wl1271_error("SPI read busy-word timeout!\n");
200 return -ETIMEDOUT;
178} 201}
179#endif
180 202
181void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf, 203static void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
182 size_t len, bool fixed) 204 size_t len, bool fixed)
183{ 205{
184 struct spi_transfer t[3]; 206 struct spi_transfer t[3];
185 struct spi_message m; 207 struct spi_message m;
@@ -202,28 +224,38 @@ void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
202 224
203 t[0].tx_buf = cmd; 225 t[0].tx_buf = cmd;
204 t[0].len = 4; 226 t[0].len = 4;
227 t[0].cs_change = true;
205 spi_message_add_tail(&t[0], &m); 228 spi_message_add_tail(&t[0], &m);
206 229
207 /* Busy and non busy words read */ 230 /* Busy and non busy words read */
208 t[1].rx_buf = busy_buf; 231 t[1].rx_buf = busy_buf;
209 t[1].len = WL1271_BUSY_WORD_LEN; 232 t[1].len = WL1271_BUSY_WORD_LEN;
233 t[1].cs_change = true;
210 spi_message_add_tail(&t[1], &m); 234 spi_message_add_tail(&t[1], &m);
211 235
212 t[2].rx_buf = buf; 236 spi_sync(wl_to_spi(wl), &m);
213 t[2].len = len;
214 spi_message_add_tail(&t[2], &m);
215 237
216 spi_sync(wl->spi, &m); 238 if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
239 wl1271_spi_read_busy(wl)) {
240 memset(buf, 0, len);
241 return;
242 }
217 243
218 /* FIXME: Check busy words, removed due to SPI bug */ 244 spi_message_init(&m);
219 /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1)) 245 memset(t, 0, sizeof(t));
220 wl1271_spi_read_busy(wl, buf, len); */ 246
247 t[0].rx_buf = buf;
248 t[0].len = len;
249 t[0].cs_change = true;
250 spi_message_add_tail(&t[0], &m);
251
252 spi_sync(wl_to_spi(wl), &m);
221 253
222 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 254 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
223 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 255 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
224} 256}
225 257
226void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf, 258static void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
227 size_t len, bool fixed) 259 size_t len, bool fixed)
228{ 260{
229 struct spi_transfer t[2]; 261 struct spi_transfer t[2];
@@ -251,8 +283,181 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
251 t[1].len = len; 283 t[1].len = len;
252 spi_message_add_tail(&t[1], &m); 284 spi_message_add_tail(&t[1], &m);
253 285
254 spi_sync(wl->spi, &m); 286 spi_sync(wl_to_spi(wl), &m);
255 287
256 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd)); 288 wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
257 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 289 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
258} 290}
291
292static irqreturn_t wl1271_irq(int irq, void *cookie)
293{
294 struct wl1271 *wl;
295 unsigned long flags;
296
297 wl1271_debug(DEBUG_IRQ, "IRQ");
298
299 wl = cookie;
300
301 /* complete the ELP completion */
302 spin_lock_irqsave(&wl->wl_lock, flags);
303 if (wl->elp_compl) {
304 complete(wl->elp_compl);
305 wl->elp_compl = NULL;
306 }
307
308 if (!test_and_set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags))
309 ieee80211_queue_work(wl->hw, &wl->irq_work);
310 set_bit(WL1271_FLAG_IRQ_PENDING, &wl->flags);
311 spin_unlock_irqrestore(&wl->wl_lock, flags);
312
313 return IRQ_HANDLED;
314}
315
316static void wl1271_spi_set_power(struct wl1271 *wl, bool enable)
317{
318 if (wl->set_power)
319 wl->set_power(enable);
320}
321
322static struct wl1271_if_operations spi_ops = {
323 .read = wl1271_spi_raw_read,
324 .write = wl1271_spi_raw_write,
325 .reset = wl1271_spi_reset,
326 .init = wl1271_spi_init,
327 .power = wl1271_spi_set_power,
328 .dev = wl1271_spi_wl_to_dev,
329 .enable_irq = wl1271_spi_enable_interrupts,
330 .disable_irq = wl1271_spi_disable_interrupts
331};
332
333static int __devinit wl1271_probe(struct spi_device *spi)
334{
335 struct wl12xx_platform_data *pdata;
336 struct ieee80211_hw *hw;
337 struct wl1271 *wl;
338 int ret;
339
340 pdata = spi->dev.platform_data;
341 if (!pdata) {
342 wl1271_error("no platform data");
343 return -ENODEV;
344 }
345
346 hw = wl1271_alloc_hw();
347 if (IS_ERR(hw))
348 return PTR_ERR(hw);
349
350 wl = hw->priv;
351
352 dev_set_drvdata(&spi->dev, wl);
353 wl->if_priv = spi;
354
355 wl->if_ops = &spi_ops;
356
357 /* This is the only SPI value that we need to set here, the rest
358 * comes from the board-peripherals file */
359 spi->bits_per_word = 32;
360
361 ret = spi_setup(spi);
362 if (ret < 0) {
363 wl1271_error("spi_setup failed");
364 goto out_free;
365 }
366
367 wl->set_power = pdata->set_power;
368 if (!wl->set_power) {
369 wl1271_error("set power function missing in platform data");
370 ret = -ENODEV;
371 goto out_free;
372 }
373
374 wl->irq = spi->irq;
375 if (wl->irq < 0) {
376 wl1271_error("irq missing in platform data");
377 ret = -ENODEV;
378 goto out_free;
379 }
380
381 ret = request_irq(wl->irq, wl1271_irq, 0, DRIVER_NAME, wl);
382 if (ret < 0) {
383 wl1271_error("request_irq() failed: %d", ret);
384 goto out_free;
385 }
386
387 set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
388
389 disable_irq(wl->irq);
390
391 ret = wl1271_init_ieee80211(wl);
392 if (ret)
393 goto out_irq;
394
395 ret = wl1271_register_hw(wl);
396 if (ret)
397 goto out_irq;
398
399 wl1271_notice("initialized");
400
401 return 0;
402
403 out_irq:
404 free_irq(wl->irq, wl);
405
406 out_free:
407 wl1271_free_hw(wl);
408
409 return ret;
410}
411
412static int __devexit wl1271_remove(struct spi_device *spi)
413{
414 struct wl1271 *wl = dev_get_drvdata(&spi->dev);
415
416 free_irq(wl->irq, wl);
417
418 wl1271_unregister_hw(wl);
419 wl1271_free_hw(wl);
420
421 return 0;
422}
423
424
425static struct spi_driver wl1271_spi_driver = {
426 .driver = {
427 .name = "wl1271_spi",
428 .bus = &spi_bus_type,
429 .owner = THIS_MODULE,
430 },
431
432 .probe = wl1271_probe,
433 .remove = __devexit_p(wl1271_remove),
434};
435
436static int __init wl1271_init(void)
437{
438 int ret;
439
440 ret = spi_register_driver(&wl1271_spi_driver);
441 if (ret < 0) {
442 wl1271_error("failed to register spi driver: %d", ret);
443 goto out;
444 }
445
446out:
447 return ret;
448}
449
450static void __exit wl1271_exit(void)
451{
452 spi_unregister_driver(&wl1271_spi_driver);
453
454 wl1271_notice("unloaded");
455}
456
457module_init(wl1271_init);
458module_exit(wl1271_exit);
459
460MODULE_LICENSE("GPL");
461MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
462MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
463MODULE_FIRMWARE(WL1271_FW_NAME);
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
deleted file mode 100644
index a803596dad4a..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
5 * Copyright (C) 2008-2009 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25#ifndef __WL1271_SPI_H__
26#define __WL1271_SPI_H__
27
28#include "wl1271_reg.h"
29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31
32#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
40
41#define HW_ACCESS_REGISTER_SIZE 4
42
43#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
44
45#define WSPI_CMD_READ 0x40000000
46#define WSPI_CMD_WRITE 0x00000000
47#define WSPI_CMD_FIXED 0x20000000
48#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
49#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
50#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
51
52#define WSPI_INIT_CMD_CRC_LEN 5
53
54#define WSPI_INIT_CMD_START 0x00
55#define WSPI_INIT_CMD_TX 0x40
56/* the extra bypass bit is sampled by the TNET as '1' */
57#define WSPI_INIT_CMD_BYPASS_BIT 0x80
58#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
59#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
60#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
61#define WSPI_INIT_CMD_IOD 0x40
62#define WSPI_INIT_CMD_IP 0x20
63#define WSPI_INIT_CMD_CS 0x10
64#define WSPI_INIT_CMD_WS 0x08
65#define WSPI_INIT_CMD_WSPI 0x01
66#define WSPI_INIT_CMD_END 0x01
67
68#define WSPI_INIT_CMD_LEN 8
69
70#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
71 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
72#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
73
74#define OCP_CMD_LOOP 32
75
76#define OCP_CMD_WRITE 0x1
77#define OCP_CMD_READ 0x2
78
79#define OCP_READY_MASK BIT(18)
80#define OCP_STATUS_MASK (BIT(16) | BIT(17))
81
82#define OCP_STATUS_NO_RESP 0x00000
83#define OCP_STATUS_OK 0x10000
84#define OCP_STATUS_REQ_FAILED 0x20000
85#define OCP_STATUS_RESP_ERROR 0x30000
86
87/* Raw target IO, address is not translated */
88void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
89 size_t len, bool fixed);
90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
91 size_t len, bool fixed);
92
93/* INIT and RESET words */
94void wl1271_spi_reset(struct wl1271 *wl);
95void wl1271_spi_init(struct wl1271 *wl);
96#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
index 5c1c4f565fd8..554deb4d024e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -26,7 +26,6 @@
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl1271.h" 28#include "wl1271.h"
29#include "wl1271_spi.h"
30#include "wl1271_acx.h" 29#include "wl1271_acx.h"
31 30
32#define WL1271_TM_MAX_DATA_LENGTH 1024 31#define WL1271_TM_MAX_DATA_LENGTH 1024
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 811e739d05bf..62db79508ddf 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -25,7 +25,6 @@
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include "wl1271.h" 27#include "wl1271.h"
28#include "wl1271_spi.h"
29#include "wl1271_io.h" 28#include "wl1271_io.h"
30#include "wl1271_reg.h" 29#include "wl1271_reg.h"
31#include "wl1271_ps.h" 30#include "wl1271_ps.h"
@@ -47,7 +46,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
47{ 46{
48 struct wl1271_tx_hw_descr *desc; 47 struct wl1271_tx_hw_descr *desc;
49 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 48 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
50 u32 total_blocks, excluded; 49 u32 total_blocks;
51 int id, ret = -EBUSY; 50 int id, ret = -EBUSY;
52 51
53 /* allocate free identifier for the packet */ 52 /* allocate free identifier for the packet */
@@ -57,12 +56,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
57 56
58 /* approximate the number of blocks required for this packet 57 /* approximate the number of blocks required for this packet
59 in the firmware */ 58 in the firmware */
60 /* FIXME: try to figure out what is done here and make it cleaner */ 59 total_blocks = total_len + TX_HW_BLOCK_SIZE - 1;
61 total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV; 60 total_blocks = total_blocks / TX_HW_BLOCK_SIZE + TX_HW_BLOCK_SPARE;
62 excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
63 total_blocks += (excluded > 252) ? 2 : 1;
64 total_blocks += TX_HW_BLOCK_SPARE;
65
66 if (total_blocks <= wl->tx_blocks_available) { 61 if (total_blocks <= wl->tx_blocks_available) {
67 desc = (struct wl1271_tx_hw_descr *)skb_push( 62 desc = (struct wl1271_tx_hw_descr *)skb_push(
68 skb, total_len - skb->len); 63 skb, total_len - skb->len);
@@ -87,8 +82,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
87static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb, 82static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
88 u32 extra, struct ieee80211_tx_info *control) 83 u32 extra, struct ieee80211_tx_info *control)
89{ 84{
85 struct timespec ts;
90 struct wl1271_tx_hw_descr *desc; 86 struct wl1271_tx_hw_descr *desc;
91 int pad, ac; 87 int pad, ac;
88 s64 hosttime;
92 u16 tx_attr; 89 u16 tx_attr;
93 90
94 desc = (struct wl1271_tx_hw_descr *) skb->data; 91 desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -102,8 +99,9 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
102 } 99 }
103 100
104 /* configure packet life time */ 101 /* configure packet life time */
105 desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) - 102 getnstimeofday(&ts);
106 wl->time_offset); 103 hosttime = (timespec_to_ns(&ts) >> 10);
104 desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
107 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 105 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
108 106
109 /* configure the tx attributes */ 107 /* configure the tx attributes */
@@ -170,7 +168,6 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
170 168
171 /* write packet new counter into the write access register */ 169 /* write packet new counter into the write access register */
172 wl->tx_packets_count++; 170 wl->tx_packets_count++;
173 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
174 171
175 desc = (struct wl1271_tx_hw_descr *) skb->data; 172 desc = (struct wl1271_tx_hw_descr *) skb->data;
176 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 173 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -223,7 +220,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
223 return ret; 220 return ret;
224} 221}
225 222
226static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set) 223u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
227{ 224{
228 struct ieee80211_supported_band *band; 225 struct ieee80211_supported_band *band;
229 u32 enabled_rates = 0; 226 u32 enabled_rates = 0;
@@ -245,6 +242,7 @@ void wl1271_tx_work(struct work_struct *work)
245 struct sk_buff *skb; 242 struct sk_buff *skb;
246 bool woken_up = false; 243 bool woken_up = false;
247 u32 sta_rates = 0; 244 u32 sta_rates = 0;
245 u32 prev_tx_packets_count;
248 int ret; 246 int ret;
249 247
250 /* check if the rates supported by the AP have changed */ 248 /* check if the rates supported by the AP have changed */
@@ -261,6 +259,8 @@ void wl1271_tx_work(struct work_struct *work)
261 if (unlikely(wl->state == WL1271_STATE_OFF)) 259 if (unlikely(wl->state == WL1271_STATE_OFF))
262 goto out; 260 goto out;
263 261
262 prev_tx_packets_count = wl->tx_packets_count;
263
264 /* if rates have changed, re-configure the rate policy */ 264 /* if rates have changed, re-configure the rate policy */
265 if (unlikely(sta_rates)) { 265 if (unlikely(sta_rates)) {
266 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); 266 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
@@ -271,31 +271,26 @@ void wl1271_tx_work(struct work_struct *work)
271 if (!woken_up) { 271 if (!woken_up) {
272 ret = wl1271_ps_elp_wakeup(wl, false); 272 ret = wl1271_ps_elp_wakeup(wl, false);
273 if (ret < 0) 273 if (ret < 0)
274 goto out; 274 goto out_ack;
275 woken_up = true; 275 woken_up = true;
276 } 276 }
277 277
278 ret = wl1271_tx_frame(wl, skb); 278 ret = wl1271_tx_frame(wl, skb);
279 if (ret == -EBUSY) { 279 if (ret == -EBUSY) {
280 /* firmware buffer is full, stop queues */ 280 /* firmware buffer is full, lets stop transmitting. */
281 wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
282 "stop queues");
283 ieee80211_stop_queues(wl->hw);
284 set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
285 skb_queue_head(&wl->tx_queue, skb); 281 skb_queue_head(&wl->tx_queue, skb);
286 goto out; 282 goto out_ack;
287 } else if (ret < 0) { 283 } else if (ret < 0) {
288 dev_kfree_skb(skb); 284 dev_kfree_skb(skb);
289 goto out; 285 goto out_ack;
290 } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
291 &wl->flags)) {
292 /* firmware buffer has space, restart queues */
293 wl1271_debug(DEBUG_TX,
294 "complete_packet: waking queues");
295 ieee80211_wake_queues(wl->hw);
296 } 286 }
297 } 287 }
298 288
289out_ack:
290 /* interrupt the firmware with the new packets */
291 if (prev_tx_packets_count != wl->tx_packets_count)
292 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
293
299out: 294out:
300 if (woken_up) 295 if (woken_up)
301 wl1271_ps_elp_sleep(wl); 296 wl1271_ps_elp_sleep(wl);
@@ -308,11 +303,12 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
308{ 303{
309 struct ieee80211_tx_info *info; 304 struct ieee80211_tx_info *info;
310 struct sk_buff *skb; 305 struct sk_buff *skb;
311 u16 seq;
312 int id = result->id; 306 int id = result->id;
307 int rate = -1;
308 u8 retries = 0;
313 309
314 /* check for id legality */ 310 /* check for id legality */
315 if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) { 311 if (unlikely(id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL)) {
316 wl1271_warning("TX result illegal id: %d", id); 312 wl1271_warning("TX result illegal id: %d", id);
317 return; 313 return;
318 } 314 }
@@ -320,31 +316,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
320 skb = wl->tx_frames[id]; 316 skb = wl->tx_frames[id];
321 info = IEEE80211_SKB_CB(skb); 317 info = IEEE80211_SKB_CB(skb);
322 318
323 /* update packet status */ 319 /* update the TX status info */
324 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 320 if (result->status == TX_SUCCESS) {
325 if (result->status == TX_SUCCESS) 321 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
326 info->flags |= IEEE80211_TX_STAT_ACK; 322 info->flags |= IEEE80211_TX_STAT_ACK;
327 if (result->status & TX_RETRY_EXCEEDED) { 323 rate = wl1271_rate_to_idx(wl, result->rate_class_index);
328 /* FIXME */ 324 retries = result->ack_failures;
329 /* info->status.excessive_retries = 1; */ 325 } else if (result->status == TX_RETRY_EXCEEDED) {
330 wl->stats.excessive_retries++; 326 wl->stats.excessive_retries++;
331 } 327 retries = result->ack_failures;
332 } 328 }
333 329
334 /* FIXME */ 330 info->status.rates[0].idx = rate;
335 /* info->status.retry_count = result->ack_failures; */ 331 info->status.rates[0].count = retries;
332 info->status.rates[0].flags = 0;
333 info->status.ack_signal = -1;
334
336 wl->stats.retry_count += result->ack_failures; 335 wl->stats.retry_count += result->ack_failures;
337 336
338 /* update security sequence number */ 337 /* update security sequence number */
339 seq = wl->tx_security_seq_16 + 338 wl->tx_security_seq += (result->lsb_security_sequence_number -
340 (result->lsb_security_sequence_number - 339 wl->tx_security_last_seq);
341 wl->tx_security_last_seq);
342 wl->tx_security_last_seq = result->lsb_security_sequence_number; 340 wl->tx_security_last_seq = result->lsb_security_sequence_number;
343 341
344 if (seq < wl->tx_security_seq_16)
345 wl->tx_security_seq_32++;
346 wl->tx_security_seq_16 = seq;
347
348 /* remove private header from packet */ 342 /* remove private header from packet */
349 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 343 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
350 344
@@ -367,23 +361,29 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
367} 361}
368 362
369/* Called upon reception of a TX complete interrupt */ 363/* Called upon reception of a TX complete interrupt */
370void wl1271_tx_complete(struct wl1271 *wl, u32 count) 364void wl1271_tx_complete(struct wl1271 *wl)
371{ 365{
372 struct wl1271_acx_mem_map *memmap = 366 struct wl1271_acx_mem_map *memmap =
373 (struct wl1271_acx_mem_map *)wl->target_mem_map; 367 (struct wl1271_acx_mem_map *)wl->target_mem_map;
368 u32 count, fw_counter;
374 u32 i; 369 u32 i;
375 370
376 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
377
378 /* read the tx results from the chipset */ 371 /* read the tx results from the chipset */
379 wl1271_read(wl, le32_to_cpu(memmap->tx_result), 372 wl1271_read(wl, le32_to_cpu(memmap->tx_result),
380 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 373 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
374 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
375
376 /* write host counter to chipset (to ack) */
377 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
378 offsetof(struct wl1271_tx_hw_res_if,
379 tx_result_host_counter), fw_counter);
380
381 count = fw_counter - wl->tx_results_count;
382 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
381 383
382 /* verify that the result buffer is not getting overrun */ 384 /* verify that the result buffer is not getting overrun */
383 if (count > TX_HW_RESULT_QUEUE_LEN) { 385 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
384 wl1271_warning("TX result overflow from chipset: %d", count); 386 wl1271_warning("TX result overflow from chipset: %d", count);
385 count = TX_HW_RESULT_QUEUE_LEN;
386 }
387 387
388 /* process the results */ 388 /* process the results */
389 for (i = 0; i < count; i++) { 389 for (i = 0; i < count; i++) {
@@ -397,11 +397,18 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
397 wl->tx_results_count++; 397 wl->tx_results_count++;
398 } 398 }
399 399
400 /* write host counter to chipset (to ack) */ 400 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
401 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + 401 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
402 offsetof(struct wl1271_tx_hw_res_if, 402 unsigned long flags;
403 tx_result_host_counter), 403
404 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter)); 404 /* firmware buffer has space, restart queues */
405 wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
406 spin_lock_irqsave(&wl->wl_lock, flags);
407 ieee80211_wake_queues(wl->hw);
408 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
409 spin_unlock_irqrestore(&wl->wl_lock, flags);
410 ieee80211_queue_work(wl->hw, &wl->tx_work);
411 }
405} 412}
406 413
407/* caller must hold wl->mutex */ 414/* caller must hold wl->mutex */
@@ -409,31 +416,19 @@ void wl1271_tx_flush(struct wl1271 *wl)
409{ 416{
410 int i; 417 int i;
411 struct sk_buff *skb; 418 struct sk_buff *skb;
412 struct ieee80211_tx_info *info;
413 419
414 /* TX failure */ 420 /* TX failure */
415/* control->flags = 0; FIXME */ 421/* control->flags = 0; FIXME */
416 422
417 while ((skb = skb_dequeue(&wl->tx_queue))) { 423 while ((skb = skb_dequeue(&wl->tx_queue))) {
418 info = IEEE80211_SKB_CB(skb);
419
420 wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb); 424 wl1271_debug(DEBUG_TX, "flushing skb 0x%p", skb);
421
422 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
423 continue;
424
425 ieee80211_tx_status(wl->hw, skb); 425 ieee80211_tx_status(wl->hw, skb);
426 } 426 }
427 427
428 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 428 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
429 if (wl->tx_frames[i] != NULL) { 429 if (wl->tx_frames[i] != NULL) {
430 skb = wl->tx_frames[i]; 430 skb = wl->tx_frames[i];
431 info = IEEE80211_SKB_CB(skb);
432
433 if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
434 continue;
435
436 ieee80211_tx_status(wl->hw, skb);
437 wl->tx_frames[i] = NULL; 431 wl->tx_frames[i] = NULL;
432 ieee80211_tx_status(wl->hw, skb);
438 } 433 }
439} 434}
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 17e405a09caa..3b8b7ac253fd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -26,7 +26,7 @@
26#define __WL1271_TX_H__ 26#define __WL1271_TX_H__
27 27
28#define TX_HW_BLOCK_SPARE 2 28#define TX_HW_BLOCK_SPARE 2
29#define TX_HW_BLOCK_SHIFT_DIV 8 29#define TX_HW_BLOCK_SIZE 252
30 30
31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000 31#define TX_HW_MGMT_PKT_LIFETIME_TU 2000
32/* The chipset reference driver states, that the "aid" value 1 32/* The chipset reference driver states, that the "aid" value 1
@@ -125,9 +125,6 @@ struct wl1271_tx_hw_res_if {
125 125
126static inline int wl1271_tx_get_queue(int queue) 126static inline int wl1271_tx_get_queue(int queue)
127{ 127{
128 /* FIXME: use best effort until WMM is enabled */
129 return CONF_TX_AC_BE;
130
131 switch (queue) { 128 switch (queue) {
132 case 0: 129 case 0:
133 return CONF_TX_AC_VO; 130 return CONF_TX_AC_VO;
@@ -160,7 +157,9 @@ static inline int wl1271_tx_ac_to_tid(int ac)
160} 157}
161 158
162void wl1271_tx_work(struct work_struct *work); 159void wl1271_tx_work(struct work_struct *work);
163void wl1271_tx_complete(struct wl1271 *wl, u32 count); 160void wl1271_tx_complete(struct wl1271 *wl);
164void wl1271_tx_flush(struct wl1271 *wl); 161void wl1271_tx_flush(struct wl1271 *wl);
162u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate);
163u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
165 164
166#endif 165#endif
diff --git a/drivers/net/wireless/wl3501_cs.c b/drivers/net/wireless/wl3501_cs.c
index 7b9621de239f..65dd502eab0d 100644
--- a/drivers/net/wireless/wl3501_cs.c
+++ b/drivers/net/wireless/wl3501_cs.c
@@ -1834,32 +1834,32 @@ out:
1834} 1834}
1835 1835
1836static const iw_handler wl3501_handler[] = { 1836static const iw_handler wl3501_handler[] = {
1837 [SIOCGIWNAME - SIOCIWFIRST] = wl3501_get_name, 1837 IW_HANDLER(SIOCGIWNAME, wl3501_get_name),
1838 [SIOCSIWFREQ - SIOCIWFIRST] = wl3501_set_freq, 1838 IW_HANDLER(SIOCSIWFREQ, wl3501_set_freq),
1839 [SIOCGIWFREQ - SIOCIWFIRST] = wl3501_get_freq, 1839 IW_HANDLER(SIOCGIWFREQ, wl3501_get_freq),
1840 [SIOCSIWMODE - SIOCIWFIRST] = wl3501_set_mode, 1840 IW_HANDLER(SIOCSIWMODE, wl3501_set_mode),
1841 [SIOCGIWMODE - SIOCIWFIRST] = wl3501_get_mode, 1841 IW_HANDLER(SIOCGIWMODE, wl3501_get_mode),
1842 [SIOCGIWSENS - SIOCIWFIRST] = wl3501_get_sens, 1842 IW_HANDLER(SIOCGIWSENS, wl3501_get_sens),
1843 [SIOCGIWRANGE - SIOCIWFIRST] = wl3501_get_range, 1843 IW_HANDLER(SIOCGIWRANGE, wl3501_get_range),
1844 [SIOCSIWSPY - SIOCIWFIRST] = iw_handler_set_spy, 1844 IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy),
1845 [SIOCGIWSPY - SIOCIWFIRST] = iw_handler_get_spy, 1845 IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy),
1846 [SIOCSIWTHRSPY - SIOCIWFIRST] = iw_handler_set_thrspy, 1846 IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy),
1847 [SIOCGIWTHRSPY - SIOCIWFIRST] = iw_handler_get_thrspy, 1847 IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy),
1848 [SIOCSIWAP - SIOCIWFIRST] = wl3501_set_wap, 1848 IW_HANDLER(SIOCSIWAP, wl3501_set_wap),
1849 [SIOCGIWAP - SIOCIWFIRST] = wl3501_get_wap, 1849 IW_HANDLER(SIOCGIWAP, wl3501_get_wap),
1850 [SIOCSIWSCAN - SIOCIWFIRST] = wl3501_set_scan, 1850 IW_HANDLER(SIOCSIWSCAN, wl3501_set_scan),
1851 [SIOCGIWSCAN - SIOCIWFIRST] = wl3501_get_scan, 1851 IW_HANDLER(SIOCGIWSCAN, wl3501_get_scan),
1852 [SIOCSIWESSID - SIOCIWFIRST] = wl3501_set_essid, 1852 IW_HANDLER(SIOCSIWESSID, wl3501_set_essid),
1853 [SIOCGIWESSID - SIOCIWFIRST] = wl3501_get_essid, 1853 IW_HANDLER(SIOCGIWESSID, wl3501_get_essid),
1854 [SIOCSIWNICKN - SIOCIWFIRST] = wl3501_set_nick, 1854 IW_HANDLER(SIOCSIWNICKN, wl3501_set_nick),
1855 [SIOCGIWNICKN - SIOCIWFIRST] = wl3501_get_nick, 1855 IW_HANDLER(SIOCGIWNICKN, wl3501_get_nick),
1856 [SIOCGIWRATE - SIOCIWFIRST] = wl3501_get_rate, 1856 IW_HANDLER(SIOCGIWRATE, wl3501_get_rate),
1857 [SIOCGIWRTS - SIOCIWFIRST] = wl3501_get_rts_threshold, 1857 IW_HANDLER(SIOCGIWRTS, wl3501_get_rts_threshold),
1858 [SIOCGIWFRAG - SIOCIWFIRST] = wl3501_get_frag_threshold, 1858 IW_HANDLER(SIOCGIWFRAG, wl3501_get_frag_threshold),
1859 [SIOCGIWTXPOW - SIOCIWFIRST] = wl3501_get_txpow, 1859 IW_HANDLER(SIOCGIWTXPOW, wl3501_get_txpow),
1860 [SIOCGIWRETRY - SIOCIWFIRST] = wl3501_get_retry, 1860 IW_HANDLER(SIOCGIWRETRY, wl3501_get_retry),
1861 [SIOCGIWENCODE - SIOCIWFIRST] = wl3501_get_encode, 1861 IW_HANDLER(SIOCGIWENCODE, wl3501_get_encode),
1862 [SIOCGIWPOWER - SIOCIWFIRST] = wl3501_get_power, 1862 IW_HANDLER(SIOCGIWPOWER, wl3501_get_power),
1863}; 1863};
1864 1864
1865static const struct iw_handler_def wl3501_handler_def = { 1865static const struct iw_handler_def wl3501_handler_def = {
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 9d1277874645..1e2b684f8ed4 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -876,7 +876,7 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
876static void zd1201_set_multicast(struct net_device *dev) 876static void zd1201_set_multicast(struct net_device *dev)
877{ 877{
878 struct zd1201 *zd = netdev_priv(dev); 878 struct zd1201 *zd = netdev_priv(dev);
879 struct dev_mc_list *mc; 879 struct netdev_hw_addr *ha;
880 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI]; 880 unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
881 int i; 881 int i;
882 882
@@ -884,8 +884,8 @@ static void zd1201_set_multicast(struct net_device *dev)
884 return; 884 return;
885 885
886 i = 0; 886 i = 0;
887 netdev_for_each_mc_addr(mc, dev) 887 netdev_for_each_mc_addr(ha, dev)
888 memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN); 888 memcpy(reqbuf + i++ * ETH_ALEN, ha->addr, ETH_ALEN);
889 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf, 889 zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
890 netdev_mc_count(dev) * ETH_ALEN, 0); 890 netdev_mc_count(dev) * ETH_ALEN, 0);
891} 891}
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 16fa289ad77b..b0b666019a93 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -948,20 +948,17 @@ static void set_rx_filter_handler(struct work_struct *work)
948} 948}
949 949
950static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw, 950static u64 zd_op_prepare_multicast(struct ieee80211_hw *hw,
951 int mc_count, struct dev_addr_list *mclist) 951 struct netdev_hw_addr_list *mc_list)
952{ 952{
953 struct zd_mac *mac = zd_hw_mac(hw); 953 struct zd_mac *mac = zd_hw_mac(hw);
954 struct zd_mc_hash hash; 954 struct zd_mc_hash hash;
955 int i; 955 struct netdev_hw_addr *ha;
956 956
957 zd_mc_clear(&hash); 957 zd_mc_clear(&hash);
958 958
959 for (i = 0; i < mc_count; i++) { 959 netdev_hw_addr_list_for_each(ha, mc_list) {
960 if (!mclist) 960 dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", ha->addr);
961 break; 961 zd_mc_add_addr(&hash, ha->addr);
962 dev_dbg_f(zd_mac_dev(mac), "mc addr %pM\n", mclist->dmi_addr);
963 zd_mc_add_addr(&hash, mclist->dmi_addr);
964 mclist = mclist->next;
965 } 962 }
966 963
967 return hash.low | ((u64)hash.high << 32); 964 return hash.low | ((u64)hash.high << 32);
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 1e783ccc306e..e9381fe3baf4 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -639,7 +639,6 @@ static void xemaclite_rx_handler(struct net_device *dev)
639 } 639 }
640 640
641 skb_put(skb, len); /* Tell the skb how much data we got */ 641 skb_put(skb, len); /* Tell the skb how much data we got */
642 skb->dev = dev; /* Fill out required meta-data */
643 642
644 skb->protocol = eth_type_trans(skb, dev); 643 skb->protocol = eth_type_trans(skb, dev);
645 skb->ip_summed = CHECKSUM_NONE; 644 skb->ip_summed = CHECKSUM_NONE;
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index ede5b2436f22..efbff76a9908 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -1299,25 +1299,25 @@ static void set_rx_mode(struct net_device *dev)
1299 /* Too many to filter well, or accept all multicasts. */ 1299 /* Too many to filter well, or accept all multicasts. */
1300 iowrite16(0x000B, ioaddr + AddrMode); 1300 iowrite16(0x000B, ioaddr + AddrMode);
1301 } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ 1301 } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
1302 struct dev_mc_list *mclist; 1302 struct netdev_hw_addr *ha;
1303 u16 hash_table[4]; 1303 u16 hash_table[4];
1304 int i; 1304 int i;
1305 1305
1306 memset(hash_table, 0, sizeof(hash_table)); 1306 memset(hash_table, 0, sizeof(hash_table));
1307 netdev_for_each_mc_addr(mclist, dev) { 1307 netdev_for_each_mc_addr(ha, dev) {
1308 unsigned int bit; 1308 unsigned int bit;
1309 1309
1310 /* Due to a bug in the early chip versions, multiple filter 1310 /* Due to a bug in the early chip versions, multiple filter
1311 slots must be set for each address. */ 1311 slots must be set for each address. */
1312 if (yp->drv_flags & HasMulticastBug) { 1312 if (yp->drv_flags & HasMulticastBug) {
1313 bit = (ether_crc_le(3, mclist->dmi_addr) >> 3) & 0x3f; 1313 bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f;
1314 hash_table[bit >> 4] |= (1 << bit); 1314 hash_table[bit >> 4] |= (1 << bit);
1315 bit = (ether_crc_le(4, mclist->dmi_addr) >> 3) & 0x3f; 1315 bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f;
1316 hash_table[bit >> 4] |= (1 << bit); 1316 hash_table[bit >> 4] |= (1 << bit);
1317 bit = (ether_crc_le(5, mclist->dmi_addr) >> 3) & 0x3f; 1317 bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f;
1318 hash_table[bit >> 4] |= (1 << bit); 1318 hash_table[bit >> 4] |= (1 << bit);
1319 } 1319 }
1320 bit = (ether_crc_le(6, mclist->dmi_addr) >> 3) & 0x3f; 1320 bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f;
1321 hash_table[bit >> 4] |= (1 << bit); 1321 hash_table[bit >> 4] |= (1 << bit);
1322 } 1322 }
1323 /* Copy the hash table to the chip. */ 1323 /* Copy the hash table to the chip. */
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 6a801dc3bf8e..904b1f3567b4 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -608,7 +608,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
608static void qeth_l2_set_multicast_list(struct net_device *dev) 608static void qeth_l2_set_multicast_list(struct net_device *dev)
609{ 609{
610 struct qeth_card *card = dev->ml_priv; 610 struct qeth_card *card = dev->ml_priv;
611 struct dev_addr_list *dm;
612 struct netdev_hw_addr *ha; 611 struct netdev_hw_addr *ha;
613 612
614 if (card->info.type == QETH_CARD_TYPE_OSN) 613 if (card->info.type == QETH_CARD_TYPE_OSN)
@@ -620,8 +619,8 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
620 return; 619 return;
621 qeth_l2_del_all_mc(card); 620 qeth_l2_del_all_mc(card);
622 spin_lock_bh(&card->mclock); 621 spin_lock_bh(&card->mclock);
623 for (dm = dev->mc_list; dm; dm = dm->next) 622 netdev_for_each_mc_addr(ha, dev)
624 qeth_l2_add_mc(card, dm->da_addr, 0); 623 qeth_l2_add_mc(card, ha->addr, 0);
625 624
626 netdev_for_each_uc_addr(ha, dev) 625 netdev_for_each_uc_addr(ha, dev)
627 qeth_l2_add_mc(card, ha->addr, 1); 626 qeth_l2_add_mc(card, ha->addr, 1);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fc6ca1da8b98..35b6d3d2bd73 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1929,7 +1929,7 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
1929 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid)); 1929 in6_dev = in6_dev_get(vlan_group_get_device(card->vlangrp, vid));
1930 if (!in6_dev) 1930 if (!in6_dev)
1931 return; 1931 return;
1932 for (ifa = in6_dev->addr_list; ifa; ifa = ifa->lst_next) { 1932 list_for_each_entry(ifa, &in6_dev->addr_list, if_list) {
1933 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6); 1933 addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
1934 if (addr) { 1934 if (addr) {
1935 memcpy(&addr->u.a6.addr, &ifa->addr, 1935 memcpy(&addr->u.a6.addr, &ifa->addr,
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index f01b9b44e8aa..54c870b8c328 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -309,10 +309,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
309 * for multiple unicast MACs. 309 * for multiple unicast MACs.
310 */ 310 */
311 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 311 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
312 dev_unicast_add(netdev, flogi_maddr); 312 dev_uc_add(netdev, flogi_maddr);
313 if (fip->spma) 313 if (fip->spma)
314 dev_unicast_add(netdev, fip->ctl_src_addr); 314 dev_uc_add(netdev, fip->ctl_src_addr);
315 dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); 315 dev_mc_add(netdev, FIP_ALL_ENODE_MACS);
316 316
317 /* 317 /*
318 * setup the receive function from ethernet driver 318 * setup the receive function from ethernet driver
@@ -395,10 +395,10 @@ void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
395 395
396 /* Delete secondary MAC addresses */ 396 /* Delete secondary MAC addresses */
397 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN); 397 memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
398 dev_unicast_delete(netdev, flogi_maddr); 398 dev_uc_del(netdev, flogi_maddr);
399 if (fip->spma) 399 if (fip->spma)
400 dev_unicast_delete(netdev, fip->ctl_src_addr); 400 dev_uc_del(netdev, fip->ctl_src_addr);
401 dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0); 401 dev_mc_del(netdev, FIP_ALL_ENODE_MACS);
402 402
403 /* Tell the LLD we are done w/ FCoE */ 403 /* Tell the LLD we are done w/ FCoE */
404 ops = netdev->netdev_ops; 404 ops = netdev->netdev_ops;
@@ -491,9 +491,9 @@ static void fcoe_update_src_mac(struct fc_lport *lport, u8 *addr)
491 491
492 rtnl_lock(); 492 rtnl_lock();
493 if (!is_zero_ether_addr(port->data_src_addr)) 493 if (!is_zero_ether_addr(port->data_src_addr))
494 dev_unicast_delete(fcoe->netdev, port->data_src_addr); 494 dev_uc_del(fcoe->netdev, port->data_src_addr);
495 if (!is_zero_ether_addr(addr)) 495 if (!is_zero_ether_addr(addr))
496 dev_unicast_add(fcoe->netdev, addr); 496 dev_uc_add(fcoe->netdev, addr);
497 memcpy(port->data_src_addr, addr, ETH_ALEN); 497 memcpy(port->data_src_addr, addr, ETH_ALEN);
498 rtnl_unlock(); 498 rtnl_unlock();
499} 499}
@@ -820,7 +820,7 @@ static void fcoe_if_destroy(struct fc_lport *lport)
820 820
821 rtnl_lock(); 821 rtnl_lock();
822 if (!is_zero_ether_addr(port->data_src_addr)) 822 if (!is_zero_ether_addr(port->data_src_addr))
823 dev_unicast_delete(netdev, port->data_src_addr); 823 dev_uc_del(netdev, port->data_src_addr);
824 rtnl_unlock(); 824 rtnl_unlock();
825 825
826 /* receives may not be stopped until after this */ 826 /* receives may not be stopped until after this */
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 0ee725ced511..9eae04afa9a0 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -599,9 +599,9 @@ static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx); 599 set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock); 600 write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
601 601
602 if (sock->sk->sk_sleep && waitqueue_active(sock->sk->sk_sleep)) { 602 if (sk_sleep(sock->sk) && waitqueue_active(sk_sleep(sock->sk))) {
603 sock->sk->sk_err = EIO; 603 sock->sk->sk_err = EIO;
604 wake_up_interruptible(sock->sk->sk_sleep); 604 wake_up_interruptible(sk_sleep(sock->sk));
605 } 605 }
606 606
607 iscsi_conn_stop(cls_conn, flag); 607 iscsi_conn_stop(cls_conn, flag);
diff --git a/drivers/ssb/driver_chipcommon.c b/drivers/ssb/driver_chipcommon.c
index 9681536163ca..59c3c0fdbecd 100644
--- a/drivers/ssb/driver_chipcommon.c
+++ b/drivers/ssb/driver_chipcommon.c
@@ -370,6 +370,7 @@ u32 ssb_chipco_gpio_control(struct ssb_chipcommon *cc, u32 mask, u32 value)
370{ 370{
371 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value); 371 return chipco_write32_masked(cc, SSB_CHIPCO_GPIOCTL, mask, value);
372} 372}
373EXPORT_SYMBOL(ssb_chipco_gpio_control);
373 374
374u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value) 375u32 ssb_chipco_gpio_intmask(struct ssb_chipcommon *cc, u32 mask, u32 value)
375{ 376{
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 88fdd53cf5d3..80284522c42b 100644
--- a/drivers/staging/arlan/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
@@ -1458,7 +1458,7 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1458 !netdev_mc_empty(dev)) 1458 !netdev_mc_empty(dev))
1459 { 1459 {
1460 char hw_dst_addr[6]; 1460 char hw_dst_addr[6];
1461 struct dev_mc_list *dmi; 1461 struct netdev_hw_addr *ha;
1462 int i; 1462 int i;
1463 1463
1464 memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6); 1464 memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
@@ -1469,12 +1469,13 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
1469 printk(KERN_ERR "%s mcast 0x0100 \n", dev->name); 1469 printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
1470 else if (hw_dst_addr[1] == 0x40) 1470 else if (hw_dst_addr[1] == 0x40)
1471 printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name); 1471 printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
1472 netdev_for_each_mc_entry(dmi, dev) { 1472 netdev_for_each_mc_entry(ha, dev) {
1473 if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP) 1473 if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
1474 printk(KERN_ERR "%s mcl %pM\n", 1474 printk(KERN_ERR "%s mcl %pM\n",
1475 dev->name, dmi->dmi_addr); 1475 dev->name,
1476 ha->addr);
1476 for (i = 0; i < 6; i++) 1477 for (i = 0; i < 6; i++)
1477 if (dmi->dmi_addr[i] != hw_dst_addr[i]) 1478 if (ha->addr[i] != hw_dst_addr[i])
1478 break; 1479 break;
1479 if (i == 6) 1480 if (i == 6)
1480 break; 1481 break;
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index ab047f2ff72c..abc82c3dad21 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -404,7 +404,7 @@ void et131x_multicast(struct net_device *netdev)
404 struct et131x_adapter *adapter = netdev_priv(netdev); 404 struct et131x_adapter *adapter = netdev_priv(netdev);
405 uint32_t PacketFilter = 0; 405 uint32_t PacketFilter = 0;
406 unsigned long flags; 406 unsigned long flags;
407 struct dev_mc_list *mclist; 407 struct netdev_hw_addr *ha;
408 int i; 408 int i;
409 409
410 spin_lock_irqsave(&adapter->Lock, flags); 410 spin_lock_irqsave(&adapter->Lock, flags);
@@ -449,10 +449,10 @@ void et131x_multicast(struct net_device *netdev)
449 449
450 /* Set values in the private adapter struct */ 450 /* Set values in the private adapter struct */
451 i = 0; 451 i = 0;
452 netdev_for_each_mc_addr(mclist, netdev) { 452 netdev_for_each_mc_addr(ha, netdev) {
453 if (i == NIC_MAX_MCAST_LIST) 453 if (i == NIC_MAX_MCAST_LIST)
454 break; 454 break;
455 memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN); 455 memcpy(adapter->MCList[i++], ha->addr, ETH_ALEN);
456 } 456 }
457 adapter->MCAddressCount = i; 457 adapter->MCAddressCount = i;
458 458
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 7daeced317c4..bebf0fd2af85 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1367,12 +1367,12 @@ static void slic_mcast_set_list(struct net_device *dev)
1367 struct adapter *adapter = netdev_priv(dev); 1367 struct adapter *adapter = netdev_priv(dev);
1368 int status = STATUS_SUCCESS; 1368 int status = STATUS_SUCCESS;
1369 char *addresses; 1369 char *addresses;
1370 struct dev_mc_list *mc_list; 1370 struct netdev_hw_addr *ha;
1371 1371
1372 ASSERT(adapter); 1372 ASSERT(adapter);
1373 1373
1374 netdev_for_each_mc_addr(mc_list, dev) { 1374 netdev_for_each_mc_addr(ha, dev) {
1375 addresses = (char *) &mc_list->dmi_addr; 1375 addresses = (char *) &ha->addr;
1376 status = slic_mcast_add_list(adapter, addresses); 1376 status = slic_mcast_add_list(adapter, addresses);
1377 if (status != STATUS_SUCCESS) 1377 if (status != STATUS_SUCCESS)
1378 break; 1378 break;
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index e40a2e990f4f..18f4dfed997f 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3080,7 +3080,7 @@ static void device_set_multi(struct net_device *dev) {
3080 3080
3081 PSMgmtObject pMgmt = pDevice->pMgmt; 3081 PSMgmtObject pMgmt = pDevice->pMgmt;
3082 u32 mc_filter[2]; 3082 u32 mc_filter[2];
3083 struct dev_mc_list *mclist; 3083 struct netdev_hw_addr *ha;
3084 3084
3085 3085
3086 VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode)); 3086 VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3100,8 +3100,8 @@ static void device_set_multi(struct net_device *dev) {
3100 } 3100 }
3101 else { 3101 else {
3102 memset(mc_filter, 0, sizeof(mc_filter)); 3102 memset(mc_filter, 0, sizeof(mc_filter));
3103 netdev_for_each_mc_addr(mclist, dev) { 3103 netdev_for_each_mc_addr(ha, dev) {
3104 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 3104 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
3105 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); 3105 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
3106 } 3106 }
3107 MACvSelectPage1(pDevice->PortOffset); 3107 MACvSelectPage1(pDevice->PortOffset);
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index a8e1adbc9592..49270db98fbb 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) {
1596 PSMgmtObject pMgmt = &(pDevice->sMgmtObj); 1596 PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
1597 u32 mc_filter[2]; 1597 u32 mc_filter[2];
1598 int ii; 1598 int ii;
1599 struct dev_mc_list *mclist; 1599 struct netdev_hw_addr *ha;
1600 BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff}; 1600 BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
1601 BYTE byTmpMode = 0; 1601 BYTE byTmpMode = 0;
1602 int rc; 1602 int rc;
@@ -1632,8 +1632,8 @@ static void device_set_multi(struct net_device *dev) {
1632 } 1632 }
1633 else { 1633 else {
1634 memset(mc_filter, 0, sizeof(mc_filter)); 1634 memset(mc_filter, 0, sizeof(mc_filter));
1635 netdev_for_each_mc_addr(mclist, dev) { 1635 netdev_for_each_mc_addr(ha, dev) {
1636 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26; 1636 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
1637 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31)); 1637 mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
1638 } 1638 }
1639 for (ii = 0; ii < 4; ii++) { 1639 for (ii = 0; ii < 4; ii++) {
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index 54ca63196fdd..f44ef351647b 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -3419,7 +3419,7 @@ static void wv_82586_config(struct net_device * dev)
3419 ac_cfg_t cfg; /* Configure action */ 3419 ac_cfg_t cfg; /* Configure action */
3420 ac_ias_t ias; /* IA-setup action */ 3420 ac_ias_t ias; /* IA-setup action */
3421 ac_mcs_t mcs; /* Multicast setup */ 3421 ac_mcs_t mcs; /* Multicast setup */
3422 struct dev_mc_list *dmi; 3422 struct netdev_hw_addr *ha;
3423 3423
3424#ifdef DEBUG_CONFIG_TRACE 3424#ifdef DEBUG_CONFIG_TRACE
3425 printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name); 3425 printk(KERN_DEBUG "%s: ->wv_82586_config()\n", dev->name);
@@ -3531,16 +3531,16 @@ static void wv_82586_config(struct net_device * dev)
3531 3531
3532 /* Any address to set? */ 3532 /* Any address to set? */
3533 if (lp->mc_count) { 3533 if (lp->mc_count) {
3534 netdev_for_each_mc_addr(dmi, dev) 3534 netdev_for_each_mc_addr(ha, dev)
3535 outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr, 3535 outsw(PIOP1(ioaddr), (u16 *) ha->addr,
3536 WAVELAN_ADDR_SIZE >> 1); 3536 WAVELAN_ADDR_SIZE >> 1);
3537 3537
3538#ifdef DEBUG_CONFIG_INFO 3538#ifdef DEBUG_CONFIG_INFO
3539 printk(KERN_DEBUG 3539 printk(KERN_DEBUG
3540 "%s: wv_82586_config(): set %d multicast addresses:\n", 3540 "%s: wv_82586_config(): set %d multicast addresses:\n",
3541 dev->name, lp->mc_count); 3541 dev->name, lp->mc_count);
3542 netdev_for_each_mc_addr(dmi, dev) 3542 netdev_for_each_mc_addr(ha, dev)
3543 printk(KERN_DEBUG " %pM\n", dmi->dmi_addr); 3543 printk(KERN_DEBUG " %pM\n", ha->addr);
3544#endif 3544#endif
3545 } 3545 }
3546 3546
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 04f691d127b4..a90132a204e6 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -3591,20 +3591,20 @@ wv_82593_config(struct net_device * dev)
3591 /* If roaming is enabled, join the "Beacon Request" multicast group... */ 3591 /* If roaming is enabled, join the "Beacon Request" multicast group... */
3592 /* But only if it's not in there already! */ 3592 /* But only if it's not in there already! */
3593 if(do_roaming) 3593 if(do_roaming)
3594 dev_mc_add(dev,WAVELAN_BEACON_ADDRESS, WAVELAN_ADDR_SIZE, 1); 3594 dev_mc_add(dev, WAVELAN_BEACON_ADDRESS);
3595#endif /* WAVELAN_ROAMING */ 3595#endif /* WAVELAN_ROAMING */
3596 3596
3597 /* If any multicast address to set */ 3597 /* If any multicast address to set */
3598 if(lp->mc_count) 3598 if(lp->mc_count)
3599 { 3599 {
3600 struct dev_mc_list *dmi; 3600 struct netdev_hw_addr *ha;
3601 int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count; 3601 int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
3602 3602
3603#ifdef DEBUG_CONFIG_INFO 3603#ifdef DEBUG_CONFIG_INFO
3604 printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n", 3604 printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
3605 dev->name, lp->mc_count); 3605 dev->name, lp->mc_count);
3606 netdev_for_each_mc_addr(dmi, dev) 3606 netdev_for_each_mc_addr(ha, dev)
3607 printk(KERN_DEBUG " %pM\n", dmi->dmi_addr); 3607 printk(KERN_DEBUG " %pM\n", ha->addr);
3608#endif 3608#endif
3609 3609
3610 /* Initialize adapter's ethernet multicast addresses */ 3610 /* Initialize adapter's ethernet multicast addresses */
@@ -3612,8 +3612,8 @@ wv_82593_config(struct net_device * dev)
3612 outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base)); 3612 outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
3613 outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */ 3613 outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
3614 outb((addrs_len >> 8), PIOP(base)); /* byte count msb */ 3614 outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
3615 netdev_for_each_mc_addr(dmi, dev) 3615 netdev_for_each_mc_addr(ha, dev)
3616 outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen); 3616 outsb(PIOP(base), ha->addr, dev->addr_len);
3617 3617
3618 /* reset transmit DMA pointer */ 3618 /* reset transmit DMA pointer */
3619 hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET); 3619 hacr_write_slow(base, HACR_PWR_STAT | HACR_TX_DMA_RESET);
diff --git a/drivers/staging/winbond/wbusb.c b/drivers/staging/winbond/wbusb.c
index 3482eec18651..5d9499bba9cc 100644
--- a/drivers/staging/winbond/wbusb.c
+++ b/drivers/staging/winbond/wbusb.c
@@ -92,10 +92,10 @@ static int wbsoft_get_stats(struct ieee80211_hw *hw,
92 return 0; 92 return 0;
93} 93}
94 94
95static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, int mc_count, 95static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw,
96 struct dev_addr_list *mc_list) 96 struct netdev_hw_addr_list *mc_list)
97{ 97{
98 return mc_count; 98 return netdev_hw_addr_list_count(mc_list);
99} 99}
100 100
101static void wbsoft_configure_filter(struct ieee80211_hw *dev, 101static void wbsoft_configure_filter(struct ieee80211_hw *dev,
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index 1db73ebcae28..ca8c8b134c4e 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1050,7 +1050,7 @@ void wl_multicast( struct net_device *dev )
1050//;?seems reasonable that even an AP-only driver could afford this small additional footprint 1050//;?seems reasonable that even an AP-only driver could afford this small additional footprint
1051 1051
1052 int x; 1052 int x;
1053 struct dev_mc_list *mclist; 1053 struct netdev_hw_addr *ha;
1054 struct wl_private *lp = wl_priv(dev); 1054 struct wl_private *lp = wl_priv(dev);
1055 unsigned long flags; 1055 unsigned long flags;
1056 /*------------------------------------------------------------------------*/ 1056 /*------------------------------------------------------------------------*/
@@ -1073,9 +1073,9 @@ void wl_multicast( struct net_device *dev )
1073 1073
1074 DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev)); 1074 DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
1075 1075
1076 netdev_for_each_mc_addr(mclist, dev) 1076 netdev_for_each_mc_addr(ha, dev)
1077 DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr), 1077 DBG_PRINT(" %s (%d)\n", DbgHwAddr(ha->addr),
1078 mclist->dmi_addrlen ); 1078 dev->addr_len);
1079 } 1079 }
1080#endif /* DBG */ 1080#endif /* DBG */
1081 1081
@@ -1120,9 +1120,9 @@ void wl_multicast( struct net_device *dev )
1120 lp->ltvRecord.typ = CFG_GROUP_ADDR; 1120 lp->ltvRecord.typ = CFG_GROUP_ADDR;
1121 1121
1122 x = 0; 1122 x = 0;
1123 netdev_for_each_mc_addr(mclist, dev) 1123 netdev_for_each_mc_addr(ha, dev)
1124 memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]), 1124 memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
1125 mclist->dmi_addr, ETH_ALEN); 1125 ha->addr, ETH_ALEN);
1126 DBG_PRINT( "Setting multicast list\n" ); 1126 DBG_PRINT( "Setting multicast list\n" );
1127 hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord )); 1127 hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
1128 } else { 1128 } else {
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9777583218ff..aa88911c9504 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -642,7 +642,7 @@ static struct miscdevice vhost_net_misc = {
642 &vhost_net_fops, 642 &vhost_net_fops,
643}; 643};
644 644
645int vhost_net_init(void) 645static int vhost_net_init(void)
646{ 646{
647 int r = vhost_init(); 647 int r = vhost_init();
648 if (r) 648 if (r)
@@ -659,7 +659,7 @@ err_init:
659} 659}
660module_init(vhost_net_init); 660module_init(vhost_net_init);
661 661
662void vhost_net_exit(void) 662static void vhost_net_exit(void)
663{ 663{
664 misc_deregister(&vhost_net_misc); 664 misc_deregister(&vhost_net_misc);
665 vhost_cleanup(); 665 vhost_cleanup();
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index e69d238c5af0..5c9c657ab753 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -715,8 +715,8 @@ int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
715 return 0; 715 return 0;
716} 716}
717 717
718int translate_desc(struct vhost_dev *dev, u64 addr, u32 len, 718static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
719 struct iovec iov[], int iov_size) 719 struct iovec iov[], int iov_size)
720{ 720{
721 const struct vhost_memory_region *reg; 721 const struct vhost_memory_region *reg;
722 struct vhost_memory *mem; 722 struct vhost_memory *mem;
@@ -741,7 +741,7 @@ int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
741 _iov = iov + ret; 741 _iov = iov + ret;
742 size = reg->memory_size - addr + reg->guest_phys_addr; 742 size = reg->memory_size - addr + reg->guest_phys_addr;
743 _iov->iov_len = min((u64)len, size); 743 _iov->iov_len = min((u64)len, size);
744 _iov->iov_base = (void *)(unsigned long) 744 _iov->iov_base = (void __user *)(unsigned long)
745 (reg->userspace_addr + addr - reg->guest_phys_addr); 745 (reg->userspace_addr + addr - reg->guest_phys_addr);
746 s += size; 746 s += size;
747 addr += size; 747 addr += size;
@@ -995,7 +995,7 @@ void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
995 * want to notify the guest, using eventfd. */ 995 * want to notify the guest, using eventfd. */
996int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len) 996int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
997{ 997{
998 struct vring_used_elem *used; 998 struct vring_used_elem __user *used;
999 999
1000 /* The virtqueue contains a ring of used buffers. Get a pointer to the 1000 /* The virtqueue contains a ring of used buffers. Get a pointer to the
1001 * next entry in that used ring. */ 1001 * next entry in that used ring. */
@@ -1019,7 +1019,8 @@ int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1019 smp_wmb(); 1019 smp_wmb();
1020 /* Log used ring entry write. */ 1020 /* Log used ring entry write. */
1021 log_write(vq->log_base, 1021 log_write(vq->log_base,
1022 vq->log_addr + ((void *)used - (void *)vq->used), 1022 vq->log_addr +
1023 ((void __user *)used - (void __user *)vq->used),
1023 sizeof *used); 1024 sizeof *used);
1024 /* Log used index update. */ 1025 /* Log used index update. */
1025 log_write(vq->log_base, 1026 log_write(vq->log_base,
diff --git a/include/linux/caif/caif_socket.h b/include/linux/caif/caif_socket.h
new file mode 100644
index 000000000000..8e5c8444a3f4
--- /dev/null
+++ b/include/linux/caif/caif_socket.h
@@ -0,0 +1,164 @@
1/* linux/caif_socket.h
2 * CAIF Definitions for CAIF socket and network layer
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#ifndef _LINUX_CAIF_SOCKET_H
9#define _LINUX_CAIF_SOCKET_H
10
11#include <linux/types.h>
12
13#ifdef __KERNEL__
14#include <linux/socket.h>
15#else
16#include <sys/socket.h>
17#endif
18
19
20/**
21 * enum caif_link_selector - Physical Link Selection.
22 * @CAIF_LINK_HIGH_BANDW: Physical interface for high-bandwidth
23 * traffic.
24 * @CAIF_LINK_LOW_LATENCY: Physical interface for low-latency
25 * traffic.
26 *
27 * CAIF Link Layers can register their link properties.
28 * This enum is used for choosing between CAIF Link Layers when
29 * setting up CAIF Channels when multiple CAIF Link Layers exists.
30 */
31enum caif_link_selector {
32 CAIF_LINK_HIGH_BANDW,
33 CAIF_LINK_LOW_LATENCY
34};
35
36/**
37 * enum caif_channel_priority - CAIF channel priorities.
38 *
39 * @CAIF_PRIO_MIN: Min priority for a channel.
40 * @CAIF_PRIO_LOW: Low-priority channel.
41 * @CAIF_PRIO_NORMAL: Normal/default priority level.
42 * @CAIF_PRIO_HIGH: High priority level
43 * @CAIF_PRIO_MAX: Max priority for channel
44 *
45 * Priority can be set on CAIF Channels in order to
46 * prioritize between traffic on different CAIF Channels.
47 * These priority levels are recommended, but the priority value
48 * is not restricted to the values defined in this enum, any value
49 * between CAIF_PRIO_MIN and CAIF_PRIO_MAX could be used.
50 */
51enum caif_channel_priority {
52 CAIF_PRIO_MIN = 0x01,
53 CAIF_PRIO_LOW = 0x04,
54 CAIF_PRIO_NORMAL = 0x0f,
55 CAIF_PRIO_HIGH = 0x14,
56 CAIF_PRIO_MAX = 0x1F
57};
58
59/**
60 * enum caif_protocol_type - CAIF Channel type.
61 * @CAIFPROTO_AT: Classic AT channel.
62 * @CAIFPROTO_DATAGRAM: Datagram channel.
63 * @CAIFPROTO_DATAGRAM_LOOP: Datagram loopback channel, used for testing.
64 * @CAIFPROTO_UTIL: Utility (Psock) channel.
65 * @CAIFPROTO_RFM: Remote File Manager
66 *
67 * This enum defines the CAIF Channel type to be used. This defines
68 * the service to connect to on the modem.
69 */
70enum caif_protocol_type {
71 CAIFPROTO_AT,
72 CAIFPROTO_DATAGRAM,
73 CAIFPROTO_DATAGRAM_LOOP,
74 CAIFPROTO_UTIL,
75 CAIFPROTO_RFM,
76 _CAIFPROTO_MAX
77};
78#define CAIFPROTO_MAX _CAIFPROTO_MAX
79
80/**
81 * enum caif_at_type - AT Service Endpoint
82 * @CAIF_ATTYPE_PLAIN: Connects to a plain vanilla AT channel.
83 */
84enum caif_at_type {
85 CAIF_ATTYPE_PLAIN = 2
86};
87
88/**
89 * struct sockaddr_caif - the sockaddr structure for CAIF sockets.
90 * @u: Union of address data 'switched' by family.
91 * :
92 * @u.at: Applies when family = CAIFPROTO_AT.
93 *
94 * @u.at.type: Type of AT link to set up (enum caif_at_type).
95 *
96 * @u.util: Applies when family = CAIFPROTO_UTIL
97 *
98 * @u.util.service: Utility service name.
99 *
100 * @u.dgm: Applies when family = CAIFPROTO_DATAGRAM
101 *
102 * @u.dgm.connection_id: Datagram connection id.
103 *
104 * @u.dgm.nsapi: NSAPI of the PDP-Context.
105 *
106 * @u.rfm: Applies when family = CAIFPROTO_RFM
107 *
108 * @u.rfm.connection_id: Connection ID for RFM.
109 *
110 * @u.rfm.volume: Volume to mount.
111 *
112 * Description:
113 * This structure holds the connect parameters used for setting up a
114 * CAIF Channel. It defines the service to connect to on the modem.
115 */
116struct sockaddr_caif {
117 sa_family_t family;
118 union {
119 struct {
120 __u8 type; /* type: enum caif_at_type */
121 } at; /* CAIFPROTO_AT */
122 struct {
123 char service[16];
124 } util; /* CAIFPROTO_UTIL */
125 union {
126 __u32 connection_id;
127 __u8 nsapi;
128 } dgm; /* CAIFPROTO_DATAGRAM(_LOOP)*/
129 struct {
130 __u32 connection_id;
131 char volume[16];
132 } rfm; /* CAIFPROTO_RFM */
133 } u;
134};
135
136/**
137 * enum caif_socket_opts - CAIF option values for getsockopt and setsockopt.
138 *
139 * @CAIFSO_LINK_SELECT: Selector used if multiple CAIF Link layers are
140 * available. Either a high bandwidth
141 * link can be selected (CAIF_LINK_HIGH_BANDW) or
142 * or a low latency link (CAIF_LINK_LOW_LATENCY).
143 * This option is of type __u32.
144 * Alternatively SO_BINDTODEVICE can be used.
145 *
146 * @CAIFSO_REQ_PARAM: Used to set the request parameters for a
147 * utility channel. (maximum 256 bytes). This
148 * option must be set before connecting.
149 *
150 * @CAIFSO_RSP_PARAM: Gets the response parameters for a utility
151 * channel. (maximum 256 bytes). This option
152 * is valid after a successful connect.
153 *
154 *
155 * This enum defines the CAIF Socket options to be used on a socket
156 *
157 */
158enum caif_socket_opts {
159 CAIFSO_LINK_SELECT = 127,
160 CAIFSO_REQ_PARAM = 128,
161 CAIFSO_RSP_PARAM = 129,
162};
163
164#endif /* _LINUX_CAIF_SOCKET_H */
diff --git a/include/linux/caif/if_caif.h b/include/linux/caif/if_caif.h
new file mode 100644
index 000000000000..5e7eed4edf51
--- /dev/null
+++ b/include/linux/caif/if_caif.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef IF_CAIF_H_
8#define IF_CAIF_H_
9#include <linux/sockios.h>
10#include <linux/types.h>
11#include <linux/socket.h>
12
13/**
14 * enum ifla_caif - CAIF NetlinkRT parameters.
15 * @IFLA_CAIF_IPV4_CONNID: Connection ID for IPv4 PDP Context.
16 * The type of attribute is NLA_U32.
17 * @IFLA_CAIF_IPV6_CONNID: Connection ID for IPv6 PDP Context.
18 * The type of attribute is NLA_U32.
19 * @IFLA_CAIF_LOOPBACK: If different from zero, device is doing loopback
20 * The type of attribute is NLA_U8.
21 *
22 * When using RT Netlink to create, destroy or configure a CAIF IP interface,
23 * enum ifla_caif is used to specify the configuration attributes.
24 */
25enum ifla_caif {
26 __IFLA_CAIF_UNSPEC,
27 IFLA_CAIF_IPV4_CONNID,
28 IFLA_CAIF_IPV6_CONNID,
29 IFLA_CAIF_LOOPBACK,
30 __IFLA_CAIF_MAX
31};
32#define IFLA_CAIF_MAX (__IFLA_CAIF_MAX-1)
33
34#endif /*IF_CAIF_H_*/
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6e5a7f00223d..cc0bb4961669 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -14,6 +14,7 @@
14#ifndef CAN_DEV_H 14#ifndef CAN_DEV_H
15#define CAN_DEV_H 15#define CAN_DEV_H
16 16
17#include <linux/can.h>
17#include <linux/can/netlink.h> 18#include <linux/can/netlink.h>
18#include <linux/can/error.h> 19#include <linux/can/error.h>
19 20
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
index 1448177d86d5..dba28268e651 100644
--- a/include/linux/can/platform/mcp251x.h
+++ b/include/linux/can/platform/mcp251x.h
@@ -26,8 +26,8 @@
26struct mcp251x_platform_data { 26struct mcp251x_platform_data {
27 unsigned long oscillator_frequency; 27 unsigned long oscillator_frequency;
28 int model; 28 int model;
29#define CAN_MCP251X_MCP2510 0 29#define CAN_MCP251X_MCP2510 0x2510
30#define CAN_MCP251X_MCP2515 1 30#define CAN_MCP251X_MCP2515 0x2515
31 int (*board_specific_setup)(struct spi_device *spi); 31 int (*board_specific_setup)(struct spi_device *spi);
32 int (*transceiver_enable)(int enable); 32 int (*transceiver_enable)(int enable);
33 int (*power_enable) (int enable); 33 int (*power_enable) (int enable);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index b33f316bb92e..276b40a16835 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -310,6 +310,7 @@ struct ethtool_perm_addr {
310enum ethtool_flags { 310enum ethtool_flags {
311 ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ 311 ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */
312 ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ 312 ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */
313 ETH_FLAG_RXHASH = (1 << 28),
313}; 314};
314 315
315/* The following structures are for supporting RX network flow 316/* The following structures are for supporting RX network flow
@@ -490,12 +491,12 @@ void ethtool_ntuple_flush(struct net_device *dev);
490 * get_ufo: Report whether UDP fragmentation offload is enabled 491 * get_ufo: Report whether UDP fragmentation offload is enabled
491 * set_ufo: Turn UDP fragmentation offload on or off 492 * set_ufo: Turn UDP fragmentation offload on or off
492 * self_test: Run specified self-tests 493 * self_test: Run specified self-tests
493 * get_strings: Return a set of strings that describe the requested objects 494 * get_strings: Return a set of strings that describe the requested objects
494 * phys_id: Identify the device 495 * phys_id: Identify the device
495 * get_stats: Return statistics about the device 496 * get_stats: Return statistics about the device
496 * get_flags: get 32-bit flags bitmap 497 * get_flags: get 32-bit flags bitmap
497 * set_flags: set 32-bit flags bitmap 498 * set_flags: set 32-bit flags bitmap
498 * 499 *
499 * Description: 500 * Description:
500 * 501 *
501 * get_settings: 502 * get_settings:
@@ -531,14 +532,20 @@ struct ethtool_ops {
531 int (*nway_reset)(struct net_device *); 532 int (*nway_reset)(struct net_device *);
532 u32 (*get_link)(struct net_device *); 533 u32 (*get_link)(struct net_device *);
533 int (*get_eeprom_len)(struct net_device *); 534 int (*get_eeprom_len)(struct net_device *);
534 int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); 535 int (*get_eeprom)(struct net_device *,
535 int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); 536 struct ethtool_eeprom *, u8 *);
537 int (*set_eeprom)(struct net_device *,
538 struct ethtool_eeprom *, u8 *);
536 int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); 539 int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *);
537 int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); 540 int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *);
538 void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); 541 void (*get_ringparam)(struct net_device *,
539 int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); 542 struct ethtool_ringparam *);
540 void (*get_pauseparam)(struct net_device *, struct ethtool_pauseparam*); 543 int (*set_ringparam)(struct net_device *,
541 int (*set_pauseparam)(struct net_device *, struct ethtool_pauseparam*); 544 struct ethtool_ringparam *);
545 void (*get_pauseparam)(struct net_device *,
546 struct ethtool_pauseparam*);
547 int (*set_pauseparam)(struct net_device *,
548 struct ethtool_pauseparam*);
542 u32 (*get_rx_csum)(struct net_device *); 549 u32 (*get_rx_csum)(struct net_device *);
543 int (*set_rx_csum)(struct net_device *, u32); 550 int (*set_rx_csum)(struct net_device *, u32);
544 u32 (*get_tx_csum)(struct net_device *); 551 u32 (*get_tx_csum)(struct net_device *);
@@ -550,21 +557,24 @@ struct ethtool_ops {
550 void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); 557 void (*self_test)(struct net_device *, struct ethtool_test *, u64 *);
551 void (*get_strings)(struct net_device *, u32 stringset, u8 *); 558 void (*get_strings)(struct net_device *, u32 stringset, u8 *);
552 int (*phys_id)(struct net_device *, u32); 559 int (*phys_id)(struct net_device *, u32);
553 void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, u64 *); 560 void (*get_ethtool_stats)(struct net_device *,
561 struct ethtool_stats *, u64 *);
554 int (*begin)(struct net_device *); 562 int (*begin)(struct net_device *);
555 void (*complete)(struct net_device *); 563 void (*complete)(struct net_device *);
556 u32 (*get_ufo)(struct net_device *); 564 u32 (*get_ufo)(struct net_device *);
557 int (*set_ufo)(struct net_device *, u32); 565 int (*set_ufo)(struct net_device *, u32);
558 u32 (*get_flags)(struct net_device *); 566 u32 (*get_flags)(struct net_device *);
559 int (*set_flags)(struct net_device *, u32); 567 int (*set_flags)(struct net_device *, u32);
560 u32 (*get_priv_flags)(struct net_device *); 568 u32 (*get_priv_flags)(struct net_device *);
561 int (*set_priv_flags)(struct net_device *, u32); 569 int (*set_priv_flags)(struct net_device *, u32);
562 int (*get_sset_count)(struct net_device *, int); 570 int (*get_sset_count)(struct net_device *, int);
563 int (*get_rxnfc)(struct net_device *, struct ethtool_rxnfc *, void *); 571 int (*get_rxnfc)(struct net_device *,
572 struct ethtool_rxnfc *, void *);
564 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *); 573 int (*set_rxnfc)(struct net_device *, struct ethtool_rxnfc *);
565 int (*flash_device)(struct net_device *, struct ethtool_flash *); 574 int (*flash_device)(struct net_device *, struct ethtool_flash *);
566 int (*reset)(struct net_device *, u32 *); 575 int (*reset)(struct net_device *, u32 *);
567 int (*set_rx_ntuple)(struct net_device *, struct ethtool_rx_ntuple *); 576 int (*set_rx_ntuple)(struct net_device *,
577 struct ethtool_rx_ntuple *);
568 int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *); 578 int (*get_rx_ntuple)(struct net_device *, u32 stringset, void *);
569}; 579};
570#endif /* __KERNEL__ */ 580#endif /* __KERNEL__ */
@@ -576,29 +586,29 @@ struct ethtool_ops {
576#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */ 586#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers. */
577#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */ 587#define ETHTOOL_GWOL 0x00000005 /* Get wake-on-lan options. */
578#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */ 588#define ETHTOOL_SWOL 0x00000006 /* Set wake-on-lan options. */
579#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ 589#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */
580#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */ 590#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level. */
581#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */ 591#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation. */
582#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */ 592#define ETHTOOL_GLINK 0x0000000a /* Get link status (ethtool_value) */
583#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ 593#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */
584#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */ 594#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data. */
585#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ 595#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */
586#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ 596#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */
587#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ 597#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */
588#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */ 598#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters. */
589#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ 599#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */
590#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ 600#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */
591#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ 601#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */
592#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ 602#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */
593#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ 603#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */
594#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ 604#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */
595#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable 605#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable
596 * (ethtool_value) */ 606 * (ethtool_value) */
597#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable 607#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable
598 * (ethtool_value). */ 608 * (ethtool_value). */
599#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */ 609#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test. */
600#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ 610#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */
601#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ 611#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */
602#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ 612#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */
603#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ 613#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */
604#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ 614#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */
@@ -609,24 +619,24 @@ struct ethtool_ops {
609#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */ 619#define ETHTOOL_SGSO 0x00000024 /* Set GSO enable (ethtool_value) */
610#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */ 620#define ETHTOOL_GFLAGS 0x00000025 /* Get flags bitmap(ethtool_value) */
611#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */ 621#define ETHTOOL_SFLAGS 0x00000026 /* Set flags bitmap(ethtool_value) */
612#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */ 622#define ETHTOOL_GPFLAGS 0x00000027 /* Get driver-private flags bitmap */
613#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */ 623#define ETHTOOL_SPFLAGS 0x00000028 /* Set driver-private flags bitmap */
614 624
615#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */ 625#define ETHTOOL_GRXFH 0x00000029 /* Get RX flow hash configuration */
616#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */ 626#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
617#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */ 627#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
618#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */ 628#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
619#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */ 629#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
620#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */ 630#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
621#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */ 631#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
622#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */ 632#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
623#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */ 633#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
624#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */ 634#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
625#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */ 635#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
626#define ETHTOOL_RESET 0x00000034 /* Reset hardware */ 636#define ETHTOOL_RESET 0x00000034 /* Reset hardware */
627#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */ 637#define ETHTOOL_SRXNTUPLE 0x00000035 /* Add an n-tuple filter to device */
628#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */ 638#define ETHTOOL_GRXNTUPLE 0x00000036 /* Get n-tuple filters from device */
629#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */ 639#define ETHTOOL_GSSET_INFO 0x00000037 /* Get string set info */
630 640
631/* compatibility with older code */ 641/* compatibility with older code */
632#define SPARC_ETH_GSET ETHTOOL_GSET 642#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -635,18 +645,18 @@ struct ethtool_ops {
635/* Indicates what features are supported by the interface. */ 645/* Indicates what features are supported by the interface. */
636#define SUPPORTED_10baseT_Half (1 << 0) 646#define SUPPORTED_10baseT_Half (1 << 0)
637#define SUPPORTED_10baseT_Full (1 << 1) 647#define SUPPORTED_10baseT_Full (1 << 1)
638#define SUPPORTED_100baseT_Half (1 << 2) 648#define SUPPORTED_100baseT_Half (1 << 2)
639#define SUPPORTED_100baseT_Full (1 << 3) 649#define SUPPORTED_100baseT_Full (1 << 3)
640#define SUPPORTED_1000baseT_Half (1 << 4) 650#define SUPPORTED_1000baseT_Half (1 << 4)
641#define SUPPORTED_1000baseT_Full (1 << 5) 651#define SUPPORTED_1000baseT_Full (1 << 5)
642#define SUPPORTED_Autoneg (1 << 6) 652#define SUPPORTED_Autoneg (1 << 6)
643#define SUPPORTED_TP (1 << 7) 653#define SUPPORTED_TP (1 << 7)
644#define SUPPORTED_AUI (1 << 8) 654#define SUPPORTED_AUI (1 << 8)
645#define SUPPORTED_MII (1 << 9) 655#define SUPPORTED_MII (1 << 9)
646#define SUPPORTED_FIBRE (1 << 10) 656#define SUPPORTED_FIBRE (1 << 10)
647#define SUPPORTED_BNC (1 << 11) 657#define SUPPORTED_BNC (1 << 11)
648#define SUPPORTED_10000baseT_Full (1 << 12) 658#define SUPPORTED_10000baseT_Full (1 << 12)
649#define SUPPORTED_Pause (1 << 13) 659#define SUPPORTED_Pause (1 << 13)
650#define SUPPORTED_Asym_Pause (1 << 14) 660#define SUPPORTED_Asym_Pause (1 << 14)
651#define SUPPORTED_2500baseX_Full (1 << 15) 661#define SUPPORTED_2500baseX_Full (1 << 15)
652#define SUPPORTED_Backplane (1 << 16) 662#define SUPPORTED_Backplane (1 << 16)
@@ -656,8 +666,8 @@ struct ethtool_ops {
656#define SUPPORTED_10000baseR_FEC (1 << 20) 666#define SUPPORTED_10000baseR_FEC (1 << 20)
657 667
658/* Indicates what features are advertised by the interface. */ 668/* Indicates what features are advertised by the interface. */
659#define ADVERTISED_10baseT_Half (1 << 0) 669#define ADVERTISED_10baseT_Half (1 << 0)
660#define ADVERTISED_10baseT_Full (1 << 1) 670#define ADVERTISED_10baseT_Full (1 << 1)
661#define ADVERTISED_100baseT_Half (1 << 2) 671#define ADVERTISED_100baseT_Half (1 << 2)
662#define ADVERTISED_100baseT_Full (1 << 3) 672#define ADVERTISED_100baseT_Full (1 << 3)
663#define ADVERTISED_1000baseT_Half (1 << 4) 673#define ADVERTISED_1000baseT_Half (1 << 4)
@@ -696,12 +706,12 @@ struct ethtool_ops {
696#define DUPLEX_FULL 0x01 706#define DUPLEX_FULL 0x01
697 707
698/* Which connector port. */ 708/* Which connector port. */
699#define PORT_TP 0x00 709#define PORT_TP 0x00
700#define PORT_AUI 0x01 710#define PORT_AUI 0x01
701#define PORT_MII 0x02 711#define PORT_MII 0x02
702#define PORT_FIBRE 0x03 712#define PORT_FIBRE 0x03
703#define PORT_BNC 0x04 713#define PORT_BNC 0x04
704#define PORT_DA 0x05 714#define PORT_DA 0x05
705#define PORT_NONE 0xef 715#define PORT_NONE 0xef
706#define PORT_OTHER 0xff 716#define PORT_OTHER 0xff
707 717
@@ -715,7 +725,7 @@ struct ethtool_ops {
715/* Enable or disable autonegotiation. If this is set to enable, 725/* Enable or disable autonegotiation. If this is set to enable,
716 * the forced link modes above are completely ignored. 726 * the forced link modes above are completely ignored.
717 */ 727 */
718#define AUTONEG_DISABLE 0x00 728#define AUTONEG_DISABLE 0x00
719#define AUTONEG_ENABLE 0x01 729#define AUTONEG_ENABLE 0x01
720 730
721/* Mode MDI or MDI-X */ 731/* Mode MDI or MDI-X */
@@ -746,8 +756,8 @@ struct ethtool_ops {
746#define AH_V6_FLOW 0x0b 756#define AH_V6_FLOW 0x0b
747#define ESP_V6_FLOW 0x0c 757#define ESP_V6_FLOW 0x0c
748#define IP_USER_FLOW 0x0d 758#define IP_USER_FLOW 0x0d
749#define IPV4_FLOW 0x10 759#define IPV4_FLOW 0x10
750#define IPV6_FLOW 0x11 760#define IPV6_FLOW 0x11
751 761
752/* L3-L4 network traffic flow hash options */ 762/* L3-L4 network traffic flow hash options */
753#define RXH_L2DA (1 << 1) 763#define RXH_L2DA (1 << 1)
diff --git a/include/linux/fib_rules.h b/include/linux/fib_rules.h
index 51da65b68b85..04a397619ebe 100644
--- a/include/linux/fib_rules.h
+++ b/include/linux/fib_rules.h
@@ -15,6 +15,14 @@
15/* try to find source address in routing lookups */ 15/* try to find source address in routing lookups */
16#define FIB_RULE_FIND_SADDR 0x00010000 16#define FIB_RULE_FIND_SADDR 0x00010000
17 17
18/* fib_rules families. values up to 127 are reserved for real address
19 * families, values above 128 may be used arbitrarily.
20 */
21#define FIB_RULES_IPV4 AF_INET
22#define FIB_RULES_IPV6 AF_INET6
23#define FIB_RULES_DECNET AF_DECnet
24#define FIB_RULES_IPMR 128
25
18struct fib_rule_hdr { 26struct fib_rule_hdr {
19 __u8 family; 27 __u8 family;
20 __u8 dst_len; 28 __u8 dst_len;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index b834ef6d59fa..61549b26ad6f 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -80,4 +80,12 @@ enum {
80 80
81#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1) 81#define CTRL_ATTR_MCAST_GRP_MAX (__CTRL_ATTR_MCAST_GRP_MAX - 1)
82 82
83#ifdef __KERNEL__
84
85/* All generic netlink requests are serialized by a global lock. */
86extern void genl_lock(void);
87extern void genl_unlock(void);
88
89#endif /* __KERNEL__ */
90
83#endif /* __LINUX_GENERIC_NETLINK_H */ 91#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 19984958ab7b..1252ba1fbff5 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1211,6 +1211,8 @@ enum ieee80211_category {
1211 WLAN_CATEGORY_SA_QUERY = 8, 1211 WLAN_CATEGORY_SA_QUERY = 8,
1212 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, 1212 WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9,
1213 WLAN_CATEGORY_WMM = 17, 1213 WLAN_CATEGORY_WMM = 17,
1214 WLAN_CATEGORY_MESH_PLINK = 30, /* Pending ANA approval */
1215 WLAN_CATEGORY_MESH_PATH_SEL = 32, /* Pending ANA approval */
1214 WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, 1216 WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126,
1215 WLAN_CATEGORY_VENDOR_SPECIFIC = 127, 1217 WLAN_CATEGORY_VENDOR_SPECIFIC = 127,
1216}; 1218};
@@ -1324,7 +1326,6 @@ enum ieee80211_back_actioncode {
1324enum ieee80211_back_parties { 1326enum ieee80211_back_parties {
1325 WLAN_BACK_RECIPIENT = 0, 1327 WLAN_BACK_RECIPIENT = 0,
1326 WLAN_BACK_INITIATOR = 1, 1328 WLAN_BACK_INITIATOR = 1,
1327 WLAN_BACK_TIMER = 2,
1328}; 1329};
1329 1330
1330/* SA Query action */ 1331/* SA Query action */
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h
index e80b7f88f7c6..6d722f41ee7c 100644
--- a/include/linux/if_arp.h
+++ b/include/linux/if_arp.h
@@ -90,6 +90,7 @@
90 90
91#define ARPHRD_PHONET 820 /* PhoNet media type */ 91#define ARPHRD_PHONET 820 /* PhoNet media type */
92#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */ 92#define ARPHRD_PHONET_PIPE 821 /* PhoNet pipe header */
93#define ARPHRD_CAIF 822 /* CAIF media type */
93 94
94#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */ 95#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
95#define ARPHRD_NONE 0xFFFE /* zero header length */ 96#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 299b4121f914..bed7a4682b90 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -109,6 +109,7 @@
109#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */ 109#define ETH_P_TRAILER 0x001C /* Trailer switch tagging */
110#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */ 110#define ETH_P_PHONET 0x00F5 /* Nokia Phonet frames */
111#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */ 111#define ETH_P_IEEE802154 0x00F6 /* IEEE802.15.4 frame */
112#define ETH_P_CAIF 0x00F7 /* ST-Ericsson CAIF protocol */
112 113
113/* 114/*
114 * This is an Ethernet frame header. 115 * This is an Ethernet frame header.
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index c9bf92cd7653..cfd420ba72df 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -37,6 +37,38 @@ struct rtnl_link_stats {
37 __u32 tx_compressed; 37 __u32 tx_compressed;
38}; 38};
39 39
40struct rtnl_link_stats64 {
41 __u64 rx_packets; /* total packets received */
42 __u64 tx_packets; /* total packets transmitted */
43 __u64 rx_bytes; /* total bytes received */
44 __u64 tx_bytes; /* total bytes transmitted */
45 __u64 rx_errors; /* bad packets received */
46 __u64 tx_errors; /* packet transmit problems */
47 __u64 rx_dropped; /* no space in linux buffers */
48 __u64 tx_dropped; /* no space available in linux */
49 __u64 multicast; /* multicast packets received */
50 __u64 collisions;
51
52 /* detailed rx_errors: */
53 __u64 rx_length_errors;
54 __u64 rx_over_errors; /* receiver ring buff overflow */
55 __u64 rx_crc_errors; /* recved pkt with crc error */
56 __u64 rx_frame_errors; /* recv'd frame alignment error */
57 __u64 rx_fifo_errors; /* recv'r fifo overrun */
58 __u64 rx_missed_errors; /* receiver missed packet */
59
60 /* detailed tx_errors */
61 __u64 tx_aborted_errors;
62 __u64 tx_carrier_errors;
63 __u64 tx_fifo_errors;
64 __u64 tx_heartbeat_errors;
65 __u64 tx_window_errors;
66
67 /* for cslip etc */
68 __u64 rx_compressed;
69 __u64 tx_compressed;
70};
71
40/* The struct should be in sync with struct ifmap */ 72/* The struct should be in sync with struct ifmap */
41struct rtnl_link_ifmap { 73struct rtnl_link_ifmap {
42 __u64 mem_start; 74 __u64 mem_start;
@@ -83,6 +115,7 @@ enum {
83 IFLA_VF_VLAN, 115 IFLA_VF_VLAN,
84 IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */ 116 IFLA_VF_TX_RATE, /* TX Bandwidth Allocation */
85 IFLA_VFINFO, 117 IFLA_VFINFO,
118 IFLA_STATS64,
86 __IFLA_MAX 119 __IFLA_MAX
87}; 120};
88 121
diff --git a/include/linux/if_packet.h b/include/linux/if_packet.h
index aa57a5f993fc..6ac23ef1801a 100644
--- a/include/linux/if_packet.h
+++ b/include/linux/if_packet.h
@@ -47,6 +47,7 @@ struct sockaddr_ll {
47#define PACKET_TX_RING 13 47#define PACKET_TX_RING 13
48#define PACKET_LOSS 14 48#define PACKET_LOSS 14
49#define PACKET_VNET_HDR 15 49#define PACKET_VNET_HDR 15
50#define PACKET_TX_TIMESTAMP 16
50 51
51struct tpacket_stats { 52struct tpacket_stats {
52 unsigned int tp_packets; 53 unsigned int tp_packets;
diff --git a/include/linux/if_pppol2tp.h b/include/linux/if_pppol2tp.h
index c58baea4a25b..184bc5566207 100644
--- a/include/linux/if_pppol2tp.h
+++ b/include/linux/if_pppol2tp.h
@@ -2,7 +2,7 @@
2 * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661) 2 * Linux PPP over L2TP (PPPoL2TP) Socket Implementation (RFC 2661)
3 * 3 *
4 * This file supplies definitions required by the PPP over L2TP driver 4 * This file supplies definitions required by the PPP over L2TP driver
5 * (pppol2tp.c). All version information wrt this file is located in pppol2tp.c 5 * (l2tp_ppp.c). All version information wrt this file is located in l2tp_ppp.c
6 * 6 *
7 * License: 7 * License:
8 * This program is free software; you can redistribute it and/or 8 * This program is free software; you can redistribute it and/or
@@ -35,6 +35,20 @@ struct pppol2tp_addr {
35 __u16 d_tunnel, d_session; /* For sending outgoing packets */ 35 __u16 d_tunnel, d_session; /* For sending outgoing packets */
36}; 36};
37 37
38/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
39 * bits. So we need a different sockaddr structure.
40 */
41struct pppol2tpv3_addr {
42 pid_t pid; /* pid that owns the fd.
43 * 0 => current */
44 int fd; /* FD of UDP or IP socket to use */
45
46 struct sockaddr_in addr; /* IP address and port to send to */
47
48 __u32 s_tunnel, s_session; /* For matching incoming packets */
49 __u32 d_tunnel, d_session; /* For sending outgoing packets */
50};
51
38/* Socket options: 52/* Socket options:
39 * DEBUG - bitmask of debug message categories 53 * DEBUG - bitmask of debug message categories
40 * SENDSEQ - 0 => don't send packets with sequence numbers 54 * SENDSEQ - 0 => don't send packets with sequence numbers
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
index 90b5fae5d714..a6577af0c4e6 100644
--- a/include/linux/if_pppox.h
+++ b/include/linux/if_pppox.h
@@ -72,6 +72,15 @@ struct sockaddr_pppol2tp {
72 struct pppol2tp_addr pppol2tp; 72 struct pppol2tp_addr pppol2tp;
73}__attribute__ ((packed)); 73}__attribute__ ((packed));
74 74
75/* The L2TPv3 protocol changes tunnel and session ids from 16 to 32
76 * bits. So we need a different sockaddr structure.
77 */
78struct sockaddr_pppol2tpv3 {
79 sa_family_t sa_family; /* address family, AF_PPPOX */
80 unsigned int sa_protocol; /* protocol identifier */
81 struct pppol2tpv3_addr pppol2tp;
82} __attribute__ ((packed));
83
75/********************************************************************* 84/*********************************************************************
76 * 85 *
77 * ioctl interface for defining forwarding of connections 86 * ioctl interface for defining forwarding of connections
diff --git a/include/linux/l2tp.h b/include/linux/l2tp.h
new file mode 100644
index 000000000000..4bdb31df8e72
--- /dev/null
+++ b/include/linux/l2tp.h
@@ -0,0 +1,163 @@
1/*
2 * L2TP-over-IP socket for L2TPv3.
3 *
4 * Author: James Chapman <jchapman@katalix.com>
5 */
6
7#ifndef _LINUX_L2TP_H_
8#define _LINUX_L2TP_H_
9
10#include <linux/types.h>
11#ifdef __KERNEL__
12#include <linux/socket.h>
13#include <linux/in.h>
14#else
15#include <netinet/in.h>
16#endif
17
18#define IPPROTO_L2TP 115
19
20/**
21 * struct sockaddr_l2tpip - the sockaddr structure for L2TP-over-IP sockets
22 * @l2tp_family: address family number AF_L2TPIP.
23 * @l2tp_addr: protocol specific address information
24 * @l2tp_conn_id: connection id of tunnel
25 */
26#define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */
27struct sockaddr_l2tpip {
28 /* The first fields must match struct sockaddr_in */
29 sa_family_t l2tp_family; /* AF_INET */
30 __be16 l2tp_unused; /* INET port number (unused) */
31 struct in_addr l2tp_addr; /* Internet address */
32
33 __u32 l2tp_conn_id; /* Connection ID of tunnel */
34
35 /* Pad to size of `struct sockaddr'. */
36 unsigned char __pad[sizeof(struct sockaddr) - sizeof(sa_family_t) -
37 sizeof(__be16) - sizeof(struct in_addr) -
38 sizeof(__u32)];
39};
40
41/*****************************************************************************
42 * NETLINK_GENERIC netlink family.
43 *****************************************************************************/
44
45/*
46 * Commands.
47 * Valid TLVs of each command are:-
48 * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid
49 * TUNNEL_DELETE - CONN_ID
50 * TUNNEL_MODIFY - CONN_ID, udpcsum
51 * TUNNEL_GETSTATS - CONN_ID, (stats)
52 * TUNNEL_GET - CONN_ID, (...)
53 * SESSION_CREATE - SESSION_ID, PW_TYPE, offset, data_seq, cookie, peer_cookie, offset, l2spec
54 * SESSION_DELETE - SESSION_ID
55 * SESSION_MODIFY - SESSION_ID, data_seq
56 * SESSION_GET - SESSION_ID, (...)
57 * SESSION_GETSTATS - SESSION_ID, (stats)
58 *
59 */
60enum {
61 L2TP_CMD_NOOP,
62 L2TP_CMD_TUNNEL_CREATE,
63 L2TP_CMD_TUNNEL_DELETE,
64 L2TP_CMD_TUNNEL_MODIFY,
65 L2TP_CMD_TUNNEL_GET,
66 L2TP_CMD_SESSION_CREATE,
67 L2TP_CMD_SESSION_DELETE,
68 L2TP_CMD_SESSION_MODIFY,
69 L2TP_CMD_SESSION_GET,
70 __L2TP_CMD_MAX,
71};
72
73#define L2TP_CMD_MAX (__L2TP_CMD_MAX - 1)
74
75/*
76 * ATTR types defined for L2TP
77 */
78enum {
79 L2TP_ATTR_NONE, /* no data */
80 L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
81 L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
82 L2TP_ATTR_OFFSET, /* u16 */
83 L2TP_ATTR_DATA_SEQ, /* u16 */
84 L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
85 L2TP_ATTR_L2SPEC_LEN, /* u8, enum l2tp_l2spec_type */
86 L2TP_ATTR_PROTO_VERSION, /* u8 */
87 L2TP_ATTR_IFNAME, /* string */
88 L2TP_ATTR_CONN_ID, /* u32 */
89 L2TP_ATTR_PEER_CONN_ID, /* u32 */
90 L2TP_ATTR_SESSION_ID, /* u32 */
91 L2TP_ATTR_PEER_SESSION_ID, /* u32 */
92 L2TP_ATTR_UDP_CSUM, /* u8 */
93 L2TP_ATTR_VLAN_ID, /* u16 */
94 L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
95 L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
96 L2TP_ATTR_DEBUG, /* u32 */
97 L2TP_ATTR_RECV_SEQ, /* u8 */
98 L2TP_ATTR_SEND_SEQ, /* u8 */
99 L2TP_ATTR_LNS_MODE, /* u8 */
100 L2TP_ATTR_USING_IPSEC, /* u8 */
101 L2TP_ATTR_RECV_TIMEOUT, /* msec */
102 L2TP_ATTR_FD, /* int */
103 L2TP_ATTR_IP_SADDR, /* u32 */
104 L2TP_ATTR_IP_DADDR, /* u32 */
105 L2TP_ATTR_UDP_SPORT, /* u16 */
106 L2TP_ATTR_UDP_DPORT, /* u16 */
107 L2TP_ATTR_MTU, /* u16 */
108 L2TP_ATTR_MRU, /* u16 */
109 L2TP_ATTR_STATS, /* nested */
110 __L2TP_ATTR_MAX,
111};
112
113#define L2TP_ATTR_MAX (__L2TP_ATTR_MAX - 1)
114
115/* Nested in L2TP_ATTR_STATS */
116enum {
117 L2TP_ATTR_STATS_NONE, /* no data */
118 L2TP_ATTR_TX_PACKETS, /* u64 */
119 L2TP_ATTR_TX_BYTES, /* u64 */
120 L2TP_ATTR_TX_ERRORS, /* u64 */
121 L2TP_ATTR_RX_PACKETS, /* u64 */
122 L2TP_ATTR_RX_BYTES, /* u64 */
123 L2TP_ATTR_RX_SEQ_DISCARDS, /* u64 */
124 L2TP_ATTR_RX_OOS_PACKETS, /* u64 */
125 L2TP_ATTR_RX_ERRORS, /* u64 */
126 __L2TP_ATTR_STATS_MAX,
127};
128
129#define L2TP_ATTR_STATS_MAX (__L2TP_ATTR_STATS_MAX - 1)
130
131enum l2tp_pwtype {
132 L2TP_PWTYPE_NONE = 0x0000,
133 L2TP_PWTYPE_ETH_VLAN = 0x0004,
134 L2TP_PWTYPE_ETH = 0x0005,
135 L2TP_PWTYPE_PPP = 0x0007,
136 L2TP_PWTYPE_PPP_AC = 0x0008,
137 L2TP_PWTYPE_IP = 0x000b,
138 __L2TP_PWTYPE_MAX
139};
140
141enum l2tp_l2spec_type {
142 L2TP_L2SPECTYPE_NONE,
143 L2TP_L2SPECTYPE_DEFAULT,
144};
145
146enum l2tp_encap_type {
147 L2TP_ENCAPTYPE_UDP,
148 L2TP_ENCAPTYPE_IP,
149};
150
151enum l2tp_seqmode {
152 L2TP_SEQ_NONE = 0,
153 L2TP_SEQ_IP = 1,
154 L2TP_SEQ_ALL = 2,
155};
156
157/*
158 * NETLINK_GENERIC related info
159 */
160#define L2TP_GENL_NAME "l2tp"
161#define L2TP_GENL_VERSION 0x1
162
163#endif
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index 0ebaef577ff5..329a8faa6e37 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -94,6 +94,8 @@
94 94
95#define SDIO_BUS_WIDTH_1BIT 0x00 95#define SDIO_BUS_WIDTH_1BIT 0x00
96#define SDIO_BUS_WIDTH_4BIT 0x02 96#define SDIO_BUS_WIDTH_4BIT 0x02
97#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
98#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
97 99
98#define SDIO_BUS_ASYNC_INT 0x20 100#define SDIO_BUS_ASYNC_INT 0x20
99 101
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index f58e9d836f32..55f1f9c9506c 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -474,4 +474,30 @@ struct platform_device_id {
474 __attribute__((aligned(sizeof(kernel_ulong_t)))); 474 __attribute__((aligned(sizeof(kernel_ulong_t))));
475}; 475};
476 476
477#define MDIO_MODULE_PREFIX "mdio:"
478
479#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
480#define MDIO_ID_ARGS(_id) \
481 (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
482 ((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
483 ((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
484 ((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
485 ((_id)>>15) & 1, ((_id)>>14) & 1, ((_id)>>13) & 1, ((_id)>>12) & 1, \
486 ((_id)>>11) & 1, ((_id)>>10) & 1, ((_id)>>9) & 1, ((_id)>>8) & 1, \
487 ((_id)>>7) & 1, ((_id)>>6) & 1, ((_id)>>5) & 1, ((_id)>>4) & 1, \
488 ((_id)>>3) & 1, ((_id)>>2) & 1, ((_id)>>1) & 1, (_id) & 1
489
490/**
491 * struct mdio_device_id - identifies PHY devices on an MDIO/MII bus
492 * @phy_id: The result of
493 * (mdio_read(&MII_PHYSID1) << 16 | mdio_read(&PHYSID2)) & @phy_id_mask
494 * for this PHY type
495 * @phy_id_mask: Defines the significant bits of @phy_id. A value of 0
496 * is used to terminate an array of struct mdio_device_id.
497 */
498struct mdio_device_id {
499 __u32 phy_id;
500 __u32 phy_id_mask;
501};
502
477#endif /* LINUX_MOD_DEVICETABLE_H */ 503#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index c5f3d53548e2..fa04b246c9ae 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -27,7 +27,8 @@
27#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */ 27#define MRT_DEL_MFC (MRT_BASE+5) /* Delete a multicast forwarding entry */
28#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */ 28#define MRT_VERSION (MRT_BASE+6) /* Get the kernel multicast version */
29#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */ 29#define MRT_ASSERT (MRT_BASE+7) /* Activate PIM assert mode */
30#define MRT_PIM (MRT_BASE+8) /* enable PIM code */ 30#define MRT_PIM (MRT_BASE+8) /* enable PIM code */
31#define MRT_TABLE (MRT_BASE+9) /* Specify mroute table ID */
31 32
32#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */ 33#define SIOCGETVIFCNT SIOCPROTOPRIVATE /* IP protocol privates */
33#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1) 34#define SIOCGETSGCNT (SIOCPROTOPRIVATE+1)
@@ -191,10 +192,7 @@ struct vif_device {
191#define VIFF_STATIC 0x8000 192#define VIFF_STATIC 0x8000
192 193
193struct mfc_cache { 194struct mfc_cache {
194 struct mfc_cache *next; /* Next entry on cache line */ 195 struct list_head list;
195#ifdef CONFIG_NET_NS
196 struct net *mfc_net;
197#endif
198 __be32 mfc_mcastgrp; /* Group the entry belongs to */ 196 __be32 mfc_mcastgrp; /* Group the entry belongs to */
199 __be32 mfc_origin; /* Source of packet */ 197 __be32 mfc_origin; /* Source of packet */
200 vifi_t mfc_parent; /* Source interface */ 198 vifi_t mfc_parent; /* Source interface */
@@ -217,18 +215,6 @@ struct mfc_cache {
217 } mfc_un; 215 } mfc_un;
218}; 216};
219 217
220static inline
221struct net *mfc_net(const struct mfc_cache *mfc)
222{
223 return read_pnet(&mfc->mfc_net);
224}
225
226static inline
227void mfc_net_set(struct mfc_cache *mfc, struct net *net)
228{
229 write_pnet(&mfc->mfc_net, hold_net(net));
230}
231
232#define MFC_STATIC 1 218#define MFC_STATIC 1
233#define MFC_NOTIFY 2 219#define MFC_NOTIFY 2
234 220
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fa8b47637997..3c5ed5f5274e 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -223,29 +223,11 @@ struct netif_rx_stats {
223 unsigned dropped; 223 unsigned dropped;
224 unsigned time_squeeze; 224 unsigned time_squeeze;
225 unsigned cpu_collision; 225 unsigned cpu_collision;
226 unsigned received_rps;
226}; 227};
227 228
228DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat); 229DECLARE_PER_CPU(struct netif_rx_stats, netdev_rx_stat);
229 230
230struct dev_addr_list {
231 struct dev_addr_list *next;
232 u8 da_addr[MAX_ADDR_LEN];
233 u8 da_addrlen;
234 u8 da_synced;
235 int da_users;
236 int da_gusers;
237};
238
239/*
240 * We tag multicasts with these structures.
241 */
242
243#define dev_mc_list dev_addr_list
244#define dmi_addr da_addr
245#define dmi_addrlen da_addrlen
246#define dmi_users da_users
247#define dmi_gusers da_gusers
248
249struct netdev_hw_addr { 231struct netdev_hw_addr {
250 struct list_head list; 232 struct list_head list;
251 unsigned char addr[MAX_ADDR_LEN]; 233 unsigned char addr[MAX_ADDR_LEN];
@@ -254,8 +236,10 @@ struct netdev_hw_addr {
254#define NETDEV_HW_ADDR_T_SAN 2 236#define NETDEV_HW_ADDR_T_SAN 2
255#define NETDEV_HW_ADDR_T_SLAVE 3 237#define NETDEV_HW_ADDR_T_SLAVE 3
256#define NETDEV_HW_ADDR_T_UNICAST 4 238#define NETDEV_HW_ADDR_T_UNICAST 4
239#define NETDEV_HW_ADDR_T_MULTICAST 5
257 int refcount; 240 int refcount;
258 bool synced; 241 bool synced;
242 bool global_use;
259 struct rcu_head rcu_head; 243 struct rcu_head rcu_head;
260}; 244};
261 245
@@ -264,16 +248,20 @@ struct netdev_hw_addr_list {
264 int count; 248 int count;
265}; 249};
266 250
267#define netdev_uc_count(dev) ((dev)->uc.count) 251#define netdev_hw_addr_list_count(l) ((l)->count)
268#define netdev_uc_empty(dev) ((dev)->uc.count == 0) 252#define netdev_hw_addr_list_empty(l) (netdev_hw_addr_list_count(l) == 0)
269#define netdev_for_each_uc_addr(ha, dev) \ 253#define netdev_hw_addr_list_for_each(ha, l) \
270 list_for_each_entry(ha, &dev->uc.list, list) 254 list_for_each_entry(ha, &(l)->list, list)
271 255
272#define netdev_mc_count(dev) ((dev)->mc_count) 256#define netdev_uc_count(dev) netdev_hw_addr_list_count(&(dev)->uc)
273#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) 257#define netdev_uc_empty(dev) netdev_hw_addr_list_empty(&(dev)->uc)
258#define netdev_for_each_uc_addr(ha, dev) \
259 netdev_hw_addr_list_for_each(ha, &(dev)->uc)
274 260
275#define netdev_for_each_mc_addr(mclist, dev) \ 261#define netdev_mc_count(dev) netdev_hw_addr_list_count(&(dev)->mc)
276 for (mclist = dev->mc_list; mclist; mclist = mclist->next) 262#define netdev_mc_empty(dev) netdev_hw_addr_list_empty(&(dev)->mc)
263#define netdev_for_each_mc_addr(ha, dev) \
264 netdev_hw_addr_list_for_each(ha, &(dev)->mc)
277 265
278struct hh_cache { 266struct hh_cache {
279 struct hh_cache *hh_next; /* Next entry */ 267 struct hh_cache *hh_next; /* Next entry */
@@ -530,6 +518,85 @@ struct netdev_queue {
530 unsigned long tx_dropped; 518 unsigned long tx_dropped;
531} ____cacheline_aligned_in_smp; 519} ____cacheline_aligned_in_smp;
532 520
521#ifdef CONFIG_RPS
522/*
523 * This structure holds an RPS map which can be of variable length. The
524 * map is an array of CPUs.
525 */
526struct rps_map {
527 unsigned int len;
528 struct rcu_head rcu;
529 u16 cpus[0];
530};
531#define RPS_MAP_SIZE(_num) (sizeof(struct rps_map) + (_num * sizeof(u16)))
532
533/*
534 * The rps_dev_flow structure contains the mapping of a flow to a CPU and the
535 * tail pointer for that CPU's input queue at the time of last enqueue.
536 */
537struct rps_dev_flow {
538 u16 cpu;
539 u16 fill;
540 unsigned int last_qtail;
541};
542
543/*
544 * The rps_dev_flow_table structure contains a table of flow mappings.
545 */
546struct rps_dev_flow_table {
547 unsigned int mask;
548 struct rcu_head rcu;
549 struct work_struct free_work;
550 struct rps_dev_flow flows[0];
551};
552#define RPS_DEV_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_dev_flow_table) + \
553 (_num * sizeof(struct rps_dev_flow)))
554
555/*
556 * The rps_sock_flow_table contains mappings of flows to the last CPU
557 * on which they were processed by the application (set in recvmsg).
558 */
559struct rps_sock_flow_table {
560 unsigned int mask;
561 u16 ents[0];
562};
563#define RPS_SOCK_FLOW_TABLE_SIZE(_num) (sizeof(struct rps_sock_flow_table) + \
564 (_num * sizeof(u16)))
565
566#define RPS_NO_CPU 0xffff
567
568static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
569 u32 hash)
570{
571 if (table && hash) {
572 unsigned int cpu, index = hash & table->mask;
573
574 /* We only give a hint, preemption can change cpu under us */
575 cpu = raw_smp_processor_id();
576
577 if (table->ents[index] != cpu)
578 table->ents[index] = cpu;
579 }
580}
581
582static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table,
583 u32 hash)
584{
585 if (table && hash)
586 table->ents[hash & table->mask] = RPS_NO_CPU;
587}
588
589extern struct rps_sock_flow_table *rps_sock_flow_table;
590
591/* This structure contains an instance of an RX queue. */
592struct netdev_rx_queue {
593 struct rps_map *rps_map;
594 struct rps_dev_flow_table *rps_flow_table;
595 struct kobject kobj;
596 struct netdev_rx_queue *first;
597 atomic_t count;
598} ____cacheline_aligned_in_smp;
599#endif /* CONFIG_RPS */
533 600
534/* 601/*
535 * This structure defines the management hooks for network devices. 602 * This structure defines the management hooks for network devices.
@@ -764,6 +831,7 @@ struct net_device {
764#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */ 831#define NETIF_F_SCTP_CSUM (1 << 25) /* SCTP checksum offload */
765#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/ 832#define NETIF_F_FCOE_MTU (1 << 26) /* Supports max FCoE MTU, 2158 bytes*/
766#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */ 833#define NETIF_F_NTUPLE (1 << 27) /* N-tuple filters supported */
834#define NETIF_F_RXHASH (1 << 28) /* Receive hashing offload */
767 835
768 /* Segmentation offload features */ 836 /* Segmentation offload features */
769#define NETIF_F_GSO_SHIFT 16 837#define NETIF_F_GSO_SHIFT 16
@@ -840,12 +908,10 @@ struct net_device {
840 unsigned char addr_len; /* hardware address length */ 908 unsigned char addr_len; /* hardware address length */
841 unsigned short dev_id; /* for shared network cards */ 909 unsigned short dev_id; /* for shared network cards */
842 910
843 struct netdev_hw_addr_list uc; /* Secondary unicast
844 mac addresses */
845 int uc_promisc;
846 spinlock_t addr_list_lock; 911 spinlock_t addr_list_lock;
847 struct dev_addr_list *mc_list; /* Multicast mac addresses */ 912 struct netdev_hw_addr_list uc; /* Unicast mac addresses */
848 int mc_count; /* Number of installed mcasts */ 913 struct netdev_hw_addr_list mc; /* Multicast mac addresses */
914 int uc_promisc;
849 unsigned int promiscuity; 915 unsigned int promiscuity;
850 unsigned int allmulti; 916 unsigned int allmulti;
851 917
@@ -878,6 +944,15 @@ struct net_device {
878 944
879 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ 945 unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
880 946
947#ifdef CONFIG_RPS
948 struct kset *queues_kset;
949
950 struct netdev_rx_queue *_rx;
951
952 /* Number of RX queues allocated at alloc_netdev_mq() time */
953 unsigned int num_rx_queues;
954#endif
955
881 struct netdev_queue rx_queue; 956 struct netdev_queue rx_queue;
882 957
883 struct netdev_queue *_tx ____cacheline_aligned_in_smp; 958 struct netdev_queue *_tx ____cacheline_aligned_in_smp;
@@ -1306,19 +1381,34 @@ static inline int unregister_gifconf(unsigned int family)
1306} 1381}
1307 1382
1308/* 1383/*
1309 * Incoming packets are placed on per-cpu queues so that 1384 * Incoming packets are placed on per-cpu queues
1310 * no locking is needed.
1311 */ 1385 */
1312struct softnet_data { 1386struct softnet_data {
1313 struct Qdisc *output_queue; 1387 struct Qdisc *output_queue;
1314 struct sk_buff_head input_pkt_queue;
1315 struct list_head poll_list; 1388 struct list_head poll_list;
1316 struct sk_buff *completion_queue; 1389 struct sk_buff *completion_queue;
1317 1390
1391#ifdef CONFIG_RPS
1392 struct softnet_data *rps_ipi_list;
1393
1394 /* Elements below can be accessed between CPUs for RPS */
1395 struct call_single_data csd ____cacheline_aligned_in_smp;
1396 struct softnet_data *rps_ipi_next;
1397 unsigned int cpu;
1398 unsigned int input_queue_head;
1399#endif
1400 struct sk_buff_head input_pkt_queue;
1318 struct napi_struct backlog; 1401 struct napi_struct backlog;
1319}; 1402};
1320 1403
1321DECLARE_PER_CPU(struct softnet_data,softnet_data); 1404static inline void input_queue_head_incr(struct softnet_data *sd)
1405{
1406#ifdef CONFIG_RPS
1407 sd->input_queue_head++;
1408#endif
1409}
1410
1411DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
1322 1412
1323#define HAVE_NETIF_QUEUE 1413#define HAVE_NETIF_QUEUE
1324 1414
@@ -1945,6 +2035,22 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
1945extern int register_netdev(struct net_device *dev); 2035extern int register_netdev(struct net_device *dev);
1946extern void unregister_netdev(struct net_device *dev); 2036extern void unregister_netdev(struct net_device *dev);
1947 2037
2038/* General hardware address lists handling functions */
2039extern int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
2040 struct netdev_hw_addr_list *from_list,
2041 int addr_len, unsigned char addr_type);
2042extern void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
2043 struct netdev_hw_addr_list *from_list,
2044 int addr_len, unsigned char addr_type);
2045extern int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
2046 struct netdev_hw_addr_list *from_list,
2047 int addr_len);
2048extern void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
2049 struct netdev_hw_addr_list *from_list,
2050 int addr_len);
2051extern void __hw_addr_flush(struct netdev_hw_addr_list *list);
2052extern void __hw_addr_init(struct netdev_hw_addr_list *list);
2053
1948/* Functions used for device addresses handling */ 2054/* Functions used for device addresses handling */
1949extern int dev_addr_add(struct net_device *dev, unsigned char *addr, 2055extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
1950 unsigned char addr_type); 2056 unsigned char addr_type);
@@ -1956,26 +2062,34 @@ extern int dev_addr_add_multiple(struct net_device *to_dev,
1956extern int dev_addr_del_multiple(struct net_device *to_dev, 2062extern int dev_addr_del_multiple(struct net_device *to_dev,
1957 struct net_device *from_dev, 2063 struct net_device *from_dev,
1958 unsigned char addr_type); 2064 unsigned char addr_type);
2065extern void dev_addr_flush(struct net_device *dev);
2066extern int dev_addr_init(struct net_device *dev);
2067
2068/* Functions used for unicast addresses handling */
2069extern int dev_uc_add(struct net_device *dev, unsigned char *addr);
2070extern int dev_uc_del(struct net_device *dev, unsigned char *addr);
2071extern int dev_uc_sync(struct net_device *to, struct net_device *from);
2072extern void dev_uc_unsync(struct net_device *to, struct net_device *from);
2073extern void dev_uc_flush(struct net_device *dev);
2074extern void dev_uc_init(struct net_device *dev);
2075
2076/* Functions used for multicast addresses handling */
2077extern int dev_mc_add(struct net_device *dev, unsigned char *addr);
2078extern int dev_mc_add_global(struct net_device *dev, unsigned char *addr);
2079extern int dev_mc_del(struct net_device *dev, unsigned char *addr);
2080extern int dev_mc_del_global(struct net_device *dev, unsigned char *addr);
2081extern int dev_mc_sync(struct net_device *to, struct net_device *from);
2082extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
2083extern void dev_mc_flush(struct net_device *dev);
2084extern void dev_mc_init(struct net_device *dev);
1959 2085
1960/* Functions used for secondary unicast and multicast support */ 2086/* Functions used for secondary unicast and multicast support */
1961extern void dev_set_rx_mode(struct net_device *dev); 2087extern void dev_set_rx_mode(struct net_device *dev);
1962extern void __dev_set_rx_mode(struct net_device *dev); 2088extern void __dev_set_rx_mode(struct net_device *dev);
1963extern int dev_unicast_delete(struct net_device *dev, void *addr);
1964extern int dev_unicast_add(struct net_device *dev, void *addr);
1965extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
1966extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
1967extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
1968extern int dev_mc_add(struct net_device *dev, void *addr, int alen, int newonly);
1969extern int dev_mc_sync(struct net_device *to, struct net_device *from);
1970extern void dev_mc_unsync(struct net_device *to, struct net_device *from);
1971extern int __dev_addr_delete(struct dev_addr_list **list, int *count, void *addr, int alen, int all);
1972extern int __dev_addr_add(struct dev_addr_list **list, int *count, void *addr, int alen, int newonly);
1973extern int __dev_addr_sync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1974extern void __dev_addr_unsync(struct dev_addr_list **to, int *to_count, struct dev_addr_list **from, int *from_count);
1975extern int dev_set_promiscuity(struct net_device *dev, int inc); 2089extern int dev_set_promiscuity(struct net_device *dev, int inc);
1976extern int dev_set_allmulti(struct net_device *dev, int inc); 2090extern int dev_set_allmulti(struct net_device *dev, int inc);
1977extern void netdev_state_change(struct net_device *dev); 2091extern void netdev_state_change(struct net_device *dev);
1978extern void netdev_bonding_change(struct net_device *dev, 2092extern int netdev_bonding_change(struct net_device *dev,
1979 unsigned long event); 2093 unsigned long event);
1980extern void netdev_features_change(struct net_device *dev); 2094extern void netdev_features_change(struct net_device *dev);
1981/* Load a device via the kmod */ 2095/* Load a device via the kmod */
@@ -2045,54 +2159,14 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
2045 dev->gso_max_size = size; 2159 dev->gso_max_size = size;
2046} 2160}
2047 2161
2048static inline void skb_bond_set_mac_by_master(struct sk_buff *skb, 2162extern int __skb_bond_should_drop(struct sk_buff *skb,
2049 struct net_device *master) 2163 struct net_device *master);
2050{
2051 if (skb->pkt_type == PACKET_HOST) {
2052 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2053 2164
2054 memcpy(dest, master->dev_addr, ETH_ALEN);
2055 }
2056}
2057
2058/* On bonding slaves other than the currently active slave, suppress
2059 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2060 * ARP on active-backup slaves with arp_validate enabled.
2061 */
2062static inline int skb_bond_should_drop(struct sk_buff *skb, 2165static inline int skb_bond_should_drop(struct sk_buff *skb,
2063 struct net_device *master) 2166 struct net_device *master)
2064{ 2167{
2065 if (master) { 2168 if (master)
2066 struct net_device *dev = skb->dev; 2169 return __skb_bond_should_drop(skb, master);
2067
2068 if (master->priv_flags & IFF_MASTER_ARPMON)
2069 dev->last_rx = jiffies;
2070
2071 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2072 /* Do address unmangle. The local destination address
2073 * will be always the one master has. Provides the right
2074 * functionality in a bridge.
2075 */
2076 skb_bond_set_mac_by_master(skb, master);
2077 }
2078
2079 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2080 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2081 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2082 return 0;
2083
2084 if (master->priv_flags & IFF_MASTER_ALB) {
2085 if (skb->pkt_type != PACKET_BROADCAST &&
2086 skb->pkt_type != PACKET_MULTICAST)
2087 return 0;
2088 }
2089 if (master->priv_flags & IFF_MASTER_8023AD &&
2090 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2091 return 0;
2092
2093 return 1;
2094 }
2095 }
2096 return 0; 2170 return 0;
2097} 2171}
2098 2172
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h
index 28ba20fda3e2..2ea3edeee7aa 100644
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -323,6 +323,12 @@
323 * the TX command and %NL80211_ATTR_FRAME includes the contents of the 323 * the TX command and %NL80211_ATTR_FRAME includes the contents of the
324 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged 324 * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged
325 * the frame. 325 * the frame.
326 * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command
327 * is used to configure connection quality monitoring notification trigger
328 * levels.
329 * @NL80211_CMD_NOTIFY_CQM: Connection quality monitor notification. This
330 * command is used as an event to indicate the that a trigger level was
331 * reached.
326 * 332 *
327 * @NL80211_CMD_MAX: highest used command number 333 * @NL80211_CMD_MAX: highest used command number
328 * @__NL80211_CMD_AFTER_LAST: internal use 334 * @__NL80211_CMD_AFTER_LAST: internal use
@@ -419,6 +425,9 @@ enum nl80211_commands {
419 NL80211_CMD_SET_POWER_SAVE, 425 NL80211_CMD_SET_POWER_SAVE,
420 NL80211_CMD_GET_POWER_SAVE, 426 NL80211_CMD_GET_POWER_SAVE,
421 427
428 NL80211_CMD_SET_CQM,
429 NL80211_CMD_NOTIFY_CQM,
430
422 /* add new commands above here */ 431 /* add new commands above here */
423 432
424 /* used to define NL80211_CMD_MAX below */ 433 /* used to define NL80211_CMD_MAX below */
@@ -691,6 +700,15 @@ enum nl80211_commands {
691 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was 700 * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was
692 * acknowledged by the recipient. 701 * acknowledged by the recipient.
693 * 702 *
703 * @NL80211_ATTR_CQM: connection quality monitor configuration in a
704 * nested attribute with %NL80211_ATTR_CQM_* sub-attributes.
705 *
706 * @NL80211_ATTR_LOCAL_STATE_CHANGE: Flag attribute to indicate that a command
707 * is requesting a local authentication/association state change without
708 * invoking actual management frame exchange. This can be used with
709 * NL80211_CMD_AUTHENTICATE, NL80211_CMD_DEAUTHENTICATE,
710 * NL80211_CMD_DISASSOCIATE.
711 *
694 * @NL80211_ATTR_MAX: highest attribute number currently defined 712 * @NL80211_ATTR_MAX: highest attribute number currently defined
695 * @__NL80211_ATTR_AFTER_LAST: internal use 713 * @__NL80211_ATTR_AFTER_LAST: internal use
696 */ 714 */
@@ -842,6 +860,10 @@ enum nl80211_attrs {
842 860
843 NL80211_ATTR_PS_STATE, 861 NL80211_ATTR_PS_STATE,
844 862
863 NL80211_ATTR_CQM,
864
865 NL80211_ATTR_LOCAL_STATE_CHANGE,
866
845 /* add attributes here, update the policy in nl80211.c */ 867 /* add attributes here, update the policy in nl80211.c */
846 868
847 __NL80211_ATTR_AFTER_LAST, 869 __NL80211_ATTR_AFTER_LAST,
@@ -1583,4 +1605,40 @@ enum nl80211_ps_state {
1583 NL80211_PS_ENABLED, 1605 NL80211_PS_ENABLED,
1584}; 1606};
1585 1607
1608/**
1609 * enum nl80211_attr_cqm - connection quality monitor attributes
1610 * @__NL80211_ATTR_CQM_INVALID: invalid
1611 * @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies
1612 * the threshold for the RSSI level at which an event will be sent. Zero
1613 * to disable.
1614 * @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies
1615 * the minimum amount the RSSI level must change after an event before a
1616 * new event may be issued (to reduce effects of RSSI oscillation).
1617 * @NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT: RSSI threshold event
1618 * @__NL80211_ATTR_CQM_AFTER_LAST: internal
1619 * @NL80211_ATTR_CQM_MAX: highest key attribute
1620 */
1621enum nl80211_attr_cqm {
1622 __NL80211_ATTR_CQM_INVALID,
1623 NL80211_ATTR_CQM_RSSI_THOLD,
1624 NL80211_ATTR_CQM_RSSI_HYST,
1625 NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
1626
1627 /* keep last */
1628 __NL80211_ATTR_CQM_AFTER_LAST,
1629 NL80211_ATTR_CQM_MAX = __NL80211_ATTR_CQM_AFTER_LAST - 1
1630};
1631
1632/**
1633 * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event
1634 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the
1635 * configured threshold
1636 * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the
1637 * configured threshold
1638 */
1639enum nl80211_cqm_rssi_threshold_event {
1640 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
1641 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
1642};
1643
1586#endif /* __LINUX_NL80211_H */ 1644#endif /* __LINUX_NL80211_H */
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index fee6c2f68075..9c5d3fad01f3 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -182,7 +182,10 @@ static inline int notifier_to_errno(int ret)
182 * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... 182 * VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
183 */ 183 */
184 184
185/* netdevice notifier chain */ 185/* netdevice notifier chain. Please remember to update the rtnetlink
186 * notification exclusion list in rtnetlink_event() when adding new
187 * types.
188 */
186#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */ 189#define NETDEV_UP 0x0001 /* For now you can't veto a device up/down */
187#define NETDEV_DOWN 0x0002 190#define NETDEV_DOWN 0x0002
188#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface 191#define NETDEV_REBOOT 0x0003 /* Tell a protocol stack a network interface
@@ -199,8 +202,8 @@ static inline int notifier_to_errno(int ret)
199#define NETDEV_FEAT_CHANGE 0x000B 202#define NETDEV_FEAT_CHANGE 0x000B
200#define NETDEV_BONDING_FAILOVER 0x000C 203#define NETDEV_BONDING_FAILOVER 0x000C
201#define NETDEV_PRE_UP 0x000D 204#define NETDEV_PRE_UP 0x000D
202#define NETDEV_BONDING_OLDTYPE 0x000E 205#define NETDEV_PRE_TYPE_CHANGE 0x000E
203#define NETDEV_BONDING_NEWTYPE 0x000F 206#define NETDEV_POST_TYPE_CHANGE 0x000F
204#define NETDEV_POST_INIT 0x0010 207#define NETDEV_POST_INIT 0x0010
205#define NETDEV_UNREGISTER_BATCH 0x0011 208#define NETDEV_UNREGISTER_BATCH 0x0011
206 209
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 14d7fdf6a90a..987e111f7b11 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -24,6 +24,7 @@
24#include <linux/mii.h> 24#include <linux/mii.h>
25#include <linux/timer.h> 25#include <linux/timer.h>
26#include <linux/workqueue.h> 26#include <linux/workqueue.h>
27#include <linux/mod_devicetable.h>
27 28
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29 30
@@ -81,6 +82,10 @@ typedef enum {
81 */ 82 */
82#define MII_BUS_ID_SIZE (20 - 3) 83#define MII_BUS_ID_SIZE (20 - 3)
83 84
85/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
86 IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
87#define MII_ADDR_C45 (1<<30)
88
84/* 89/*
85 * The Bus class for PHYs. Devices which provide access to 90 * The Bus class for PHYs. Devices which provide access to
86 * PHYs should register using this structure 91 * PHYs should register using this structure
@@ -127,8 +132,8 @@ int mdiobus_register(struct mii_bus *bus);
127void mdiobus_unregister(struct mii_bus *bus); 132void mdiobus_unregister(struct mii_bus *bus);
128void mdiobus_free(struct mii_bus *bus); 133void mdiobus_free(struct mii_bus *bus);
129struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); 134struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr);
130int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum); 135int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum);
131int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val); 136int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val);
132 137
133 138
134#define PHY_INTERRUPT_DISABLED 0x0 139#define PHY_INTERRUPT_DISABLED 0x0
@@ -422,7 +427,7 @@ struct phy_fixup {
422 * because the bus read/write functions may wait for an interrupt 427 * because the bus read/write functions may wait for an interrupt
423 * to conclude the operation. 428 * to conclude the operation.
424 */ 429 */
425static inline int phy_read(struct phy_device *phydev, u16 regnum) 430static inline int phy_read(struct phy_device *phydev, u32 regnum)
426{ 431{
427 return mdiobus_read(phydev->bus, phydev->addr, regnum); 432 return mdiobus_read(phydev->bus, phydev->addr, regnum);
428} 433}
@@ -437,7 +442,7 @@ static inline int phy_read(struct phy_device *phydev, u16 regnum)
437 * because the bus read/write functions may wait for an interrupt 442 * because the bus read/write functions may wait for an interrupt
438 * to conclude the operation. 443 * to conclude the operation.
439 */ 444 */
440static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val) 445static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val)
441{ 446{
442 return mdiobus_write(phydev->bus, phydev->addr, regnum, val); 447 return mdiobus_write(phydev->bus, phydev->addr, regnum, val);
443} 448}
diff --git a/include/linux/ppp_channel.h b/include/linux/ppp_channel.h
index 0d3fa63e90ea..bff98ec1bfed 100644
--- a/include/linux/ppp_channel.h
+++ b/include/linux/ppp_channel.h
@@ -72,6 +72,9 @@ extern int ppp_channel_index(struct ppp_channel *);
72/* Get the unit number associated with a channel, or -1 if none */ 72/* Get the unit number associated with a channel, or -1 if none */
73extern int ppp_unit_number(struct ppp_channel *); 73extern int ppp_unit_number(struct ppp_channel *);
74 74
75/* Get the device name associated with a channel, or NULL if none */
76extern char *ppp_dev_name(struct ppp_channel *);
77
75/* 78/*
76 * SMP locking notes: 79 * SMP locking notes:
77 * The channel code must ensure that when it calls ppp_unregister_channel, 80 * The channel code must ensure that when it calls ppp_unregister_channel,
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 2c9b46cff3d7..004908b104d5 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -428,5 +428,18 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev,
428 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ 428 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
429 pos = rcu_dereference_raw(pos->next)) 429 pos = rcu_dereference_raw(pos->next))
430 430
431/**
432 * hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
433 * @tpos: the type * to use as a loop cursor.
434 * @pos: the &struct hlist_node to use as a loop cursor.
435 * @member: the name of the hlist_node within the struct.
436 */
437#define hlist_for_each_entry_continue_rcu(tpos, pos, member) \
438 for (pos = rcu_dereference((pos)->next); \
439 pos && ({ prefetch(pos->next); 1; }) && \
440 ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \
441 pos = rcu_dereference(pos->next))
442
443
431#endif /* __KERNEL__ */ 444#endif /* __KERNEL__ */
432#endif 445#endif
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 124f90cd5a38..82f5116a89e4 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -294,6 +294,7 @@ typedef unsigned char *sk_buff_data_t;
294 * @nfct_reasm: netfilter conntrack re-assembly pointer 294 * @nfct_reasm: netfilter conntrack re-assembly pointer
295 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c 295 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
296 * @skb_iif: ifindex of device we arrived on 296 * @skb_iif: ifindex of device we arrived on
297 * @rxhash: the packet hash computed on receive
297 * @queue_mapping: Queue mapping for multiqueue devices 298 * @queue_mapping: Queue mapping for multiqueue devices
298 * @tc_index: Traffic control index 299 * @tc_index: Traffic control index
299 * @tc_verd: traffic control verdict 300 * @tc_verd: traffic control verdict
@@ -369,6 +370,8 @@ struct sk_buff {
369#endif 370#endif
370#endif 371#endif
371 372
373 __u32 rxhash;
374
372 kmemcheck_bitfield_begin(flags2); 375 kmemcheck_bitfield_begin(flags2);
373 __u16 queue_mapping:16; 376 __u16 queue_mapping:16;
374#ifdef CONFIG_IPV6_NDISC_NODETYPE 377#ifdef CONFIG_IPV6_NDISC_NODETYPE
@@ -467,11 +470,6 @@ extern int skb_cow_data(struct sk_buff *skb, int tailbits,
467 struct sk_buff **trailer); 470 struct sk_buff **trailer);
468extern int skb_pad(struct sk_buff *skb, int pad); 471extern int skb_pad(struct sk_buff *skb, int pad);
469#define dev_kfree_skb(a) consume_skb(a) 472#define dev_kfree_skb(a) consume_skb(a)
470#define dev_consume_skb(a) kfree_skb_clean(a)
471extern void skb_over_panic(struct sk_buff *skb, int len,
472 void *here);
473extern void skb_under_panic(struct sk_buff *skb, int len,
474 void *here);
475 473
476extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb, 474extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
477 int getfrag(void *from, char *to, int offset, 475 int getfrag(void *from, char *to, int offset,
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index 4435d1084755..52797714ade7 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -100,6 +100,7 @@ enum
100 ICMP6_MIB_INMSGS, /* InMsgs */ 100 ICMP6_MIB_INMSGS, /* InMsgs */
101 ICMP6_MIB_INERRORS, /* InErrors */ 101 ICMP6_MIB_INERRORS, /* InErrors */
102 ICMP6_MIB_OUTMSGS, /* OutMsgs */ 102 ICMP6_MIB_OUTMSGS, /* OutMsgs */
103 ICMP6_MIB_OUTERRORS, /* OutErrors */
103 __ICMP6_MIB_MAX 104 __ICMP6_MIB_MAX
104}; 105};
105 106
@@ -227,6 +228,7 @@ enum
227 LINUX_MIB_SACKSHIFTFALLBACK, 228 LINUX_MIB_SACKSHIFTFALLBACK,
228 LINUX_MIB_TCPBACKLOGDROP, 229 LINUX_MIB_TCPBACKLOGDROP,
229 LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */ 230 LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
231 LINUX_MIB_TCPDEFERACCEPTDROP,
230 __LINUX_MIB_MAX 232 __LINUX_MIB_MAX
231}; 233};
232 234
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 354cc5617f8b..032a19eb61b1 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -189,7 +189,8 @@ struct ucred {
189#define AF_ISDN 34 /* mISDN sockets */ 189#define AF_ISDN 34 /* mISDN sockets */
190#define AF_PHONET 35 /* Phonet sockets */ 190#define AF_PHONET 35 /* Phonet sockets */
191#define AF_IEEE802154 36 /* IEEE802154 sockets */ 191#define AF_IEEE802154 36 /* IEEE802154 sockets */
192#define AF_MAX 37 /* For now.. */ 192#define AF_CAIF 37 /* CAIF sockets */
193#define AF_MAX 38 /* For now.. */
193 194
194/* Protocol families, same as address families. */ 195/* Protocol families, same as address families. */
195#define PF_UNSPEC AF_UNSPEC 196#define PF_UNSPEC AF_UNSPEC
@@ -229,6 +230,7 @@ struct ucred {
229#define PF_ISDN AF_ISDN 230#define PF_ISDN AF_ISDN
230#define PF_PHONET AF_PHONET 231#define PF_PHONET AF_PHONET
231#define PF_IEEE802154 AF_IEEE802154 232#define PF_IEEE802154 AF_IEEE802154
233#define PF_CAIF AF_CAIF
232#define PF_MAX AF_MAX 234#define PF_MAX AF_MAX
233 235
234/* Maximum queue length specifiable by listen. */ 236/* Maximum queue length specifiable by listen. */
@@ -301,6 +303,7 @@ struct ucred {
301#define SOL_PNPIPE 275 303#define SOL_PNPIPE 275
302#define SOL_RDS 276 304#define SOL_RDS 276
303#define SOL_IUCV 277 305#define SOL_IUCV 277
306#define SOL_CAIF 278
304 307
305/* IPX options */ 308/* IPX options */
306#define IPX_TYPE 1 309#define IPX_TYPE 1
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32bfd1a8a48d..632ff7c03280 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -33,6 +33,7 @@ struct plat_stmmacenet_data {
33 int bus_id; 33 int bus_id;
34 int pbl; 34 int pbl;
35 int has_gmac; 35 int has_gmac;
36 int enh_desc;
36 void (*fix_mac_speed)(void *priv, unsigned int speed); 37 void (*fix_mac_speed)(void *priv, unsigned int speed);
37 void (*bus_setup)(unsigned long ioaddr); 38 void (*bus_setup)(unsigned long ioaddr);
38#ifdef CONFIG_STM_DRIVERS 39#ifdef CONFIG_STM_DRIVERS
diff --git a/include/linux/tipc.h b/include/linux/tipc.h
index 3d92396639de..9536d8aeadf1 100644
--- a/include/linux/tipc.h
+++ b/include/linux/tipc.h
@@ -127,23 +127,17 @@ static inline unsigned int tipc_node(__u32 addr)
127 * TIPC topology subscription service definitions 127 * TIPC topology subscription service definitions
128 */ 128 */
129 129
130#define TIPC_SUB_PORTS 0x01 /* filter for port availability */ 130#define TIPC_SUB_SERVICE 0x00 /* Filter for service availability */
131#define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ 131#define TIPC_SUB_PORTS 0x01 /* Filter for port availability */
132#define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ 132#define TIPC_SUB_CANCEL 0x04 /* Cancel a subscription */
133#if 0
134/* The following filter options are not currently implemented */
135#define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */
136#define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */
137#define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */
138#endif
139 133
140#define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ 134#define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */
141 135
142struct tipc_subscr { 136struct tipc_subscr {
143 struct tipc_name_seq seq; /* name sequence of interest */ 137 struct tipc_name_seq seq; /* NBO. Name sequence of interest */
144 __u32 timeout; /* subscription duration (in ms) */ 138 __u32 timeout; /* NBO. Subscription duration (in ms) */
145 __u32 filter; /* bitmask of filter options */ 139 __u32 filter; /* NBO. Bitmask of filter options */
146 char usr_handle[8]; /* available for subscriber use */ 140 char usr_handle[8]; /* Opaque. Available for subscriber use */
147}; 141};
148 142
149#define TIPC_PUBLISHED 1 /* publication event */ 143#define TIPC_PUBLISHED 1 /* publication event */
@@ -151,11 +145,11 @@ struct tipc_subscr {
151#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ 145#define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */
152 146
153struct tipc_event { 147struct tipc_event {
154 __u32 event; /* event type */ 148 __u32 event; /* NBO. Event type, as defined above */
155 __u32 found_lower; /* matching name seq instances */ 149 __u32 found_lower; /* NBO. Matching name seq instances */
156 __u32 found_upper; /* " " " " */ 150 __u32 found_upper; /* " " " " " */
157 struct tipc_portid port; /* associated port */ 151 struct tipc_portid port; /* NBO. Associated port */
158 struct tipc_subscr s; /* associated subscription */ 152 struct tipc_subscr s; /* Original, associated subscription */
159}; 153};
160 154
161/* 155/*
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4409967db0c4..bb44fa9ae135 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -23,7 +23,7 @@
23 */ 23 */
24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */ 24#define NR_UNIX98_PTY_DEFAULT 4096 /* Default maximum for Unix98 ptys */
25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */ 25#define NR_UNIX98_PTY_MAX (1 << MINORBITS) /* Absolute limit */
26#define NR_LDISCS 20 26#define NR_LDISCS 21
27 27
28/* line disciplines */ 28/* line disciplines */
29#define N_TTY 0 29#define N_TTY 0
@@ -46,8 +46,8 @@
46#define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */ 46#define N_GIGASET_M101 16 /* Siemens Gigaset M101 serial DECT adapter */
47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */ 47#define N_SLCAN 17 /* Serial / USB serial CAN Adaptors */
48#define N_PPS 18 /* Pulse per Second */ 48#define N_PPS 18 /* Pulse per Second */
49
50#define N_V253 19 /* Codec control over voice modem */ 49#define N_V253 19 /* Codec control over voice modem */
50#define N_CAIF 20 /* CAIF protocol for talking to modems */
51 51
52/* 52/*
53 * This character is the same as _POSIX_VDISABLE: it cannot be used as 53 * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/wireless.h b/include/linux/wireless.h
index 5b4c6c772a9b..e6827eedf18b 100644
--- a/include/linux/wireless.h
+++ b/include/linux/wireless.h
@@ -346,6 +346,8 @@
346#define SIOCIWFIRST 0x8B00 346#define SIOCIWFIRST 0x8B00
347#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */ 347#define SIOCIWLAST SIOCIWLASTPRIV /* 0x8BFF */
348#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST) 348#define IW_IOCTL_IDX(cmd) ((cmd) - SIOCIWFIRST)
349#define IW_HANDLER(id, func) \
350 [IW_IOCTL_IDX(id)] = func
349 351
350/* Odd : get (world access), even : set (root access) */ 352/* Odd : get (world access), even : set (root access) */
351#define IW_IS_SET(cmd) (!((cmd) & 0x1)) 353#define IW_IS_SET(cmd) (!((cmd) & 0x1))
@@ -648,7 +650,7 @@
648 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */ 650 * 32 bit bitmasks. Note : 32 bits = 0x20 = 2^5. */
649#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \ 651#define IW_EVENT_CAPA_BASE(cmd) ((cmd >= SIOCIWFIRSTPRIV) ? \
650 (cmd - SIOCIWFIRSTPRIV + 0x60) : \ 652 (cmd - SIOCIWFIRSTPRIV + 0x60) : \
651 (cmd - SIOCSIWCOMMIT)) 653 (cmd - SIOCIWFIRST))
652#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5) 654#define IW_EVENT_CAPA_INDEX(cmd) (IW_EVENT_CAPA_BASE(cmd) >> 5)
653#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F)) 655#define IW_EVENT_CAPA_MASK(cmd) (1 << (IW_EVENT_CAPA_BASE(cmd) & 0x1F))
654/* Event capability constants - event autogenerated by the kernel 656/* Event capability constants - event autogenerated by the kernel
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
new file mode 100644
index 000000000000..42a7c7867849
--- /dev/null
+++ b/include/net/caif/caif_dev.h
@@ -0,0 +1,90 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CAIF_DEV_H_
8#define CAIF_DEV_H_
9
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfcnfg.h>
12#include <linux/caif/caif_socket.h>
13#include <linux/if.h>
14
15/**
16 * struct caif_param - CAIF parameters.
17 * @size: Length of data
18 * @data: Binary Data Blob
19 */
20struct caif_param {
21 u16 size;
22 u8 data[256];
23};
24
25/**
26 * caif_connect_request - Request data for CAIF channel setup.
27 * @sockaddr: Socket address to connect.
28 * @priority: Priority of the connection.
29 * @link_selector: Link selector (high bandwidth or low latency)
30 * @link_name: Name of the CAIF Link Layer to use.
31 *
32 * This struct is used when connecting a CAIF channel.
33 * It contains all CAIF channel configuration options.
34 */
35struct caif_connect_request {
36 int protocol;
37 struct sockaddr_caif sockaddr;
38 enum caif_channel_priority priority;
39 enum caif_link_selector link_selector;
40 char link_name[16];
41 struct caif_param param;
42};
43
44/**
45 * caif_connect_client - Connect a client to CAIF Core Stack.
46 * @config: Channel setup parameters, specifying what address
47 * to connect on the Modem.
48 * @client_layer: User implementation of client layer. This layer
49 * MUST have receive and control callback functions
50 * implemented.
51 *
52 * This function connects a CAIF channel. The Client must implement
53 * the struct cflayer. This layer represents the Client layer and holds
54 * receive functions and control callback functions. Control callback
55 * function will receive information about connect/disconnect responses,
56 * flow control etc (see enum caif_control).
57 * E.g. CAIF Socket will call this function for each socket it connects
58 * and have one client_layer instance for each socket.
59 */
60int caif_connect_client(struct caif_connect_request *config,
61 struct cflayer *client_layer);
62
63/**
64 * caif_disconnect_client - Disconnects a client from the CAIF stack.
65 *
66 * @client_layer: Client layer to be removed.
67 */
68int caif_disconnect_client(struct cflayer *client_layer);
69
70/**
71 * connect_req_to_link_param - Translate configuration parameters
72 * from socket format to internal format.
73 * @cnfg: Pointer to configuration handler
74 * @con_req: Configuration parameters supplied in function
75 * caif_connect_client
76 * @channel_setup_param: Parameters supplied to the CAIF Core stack for
77 * setting up channels.
78 *
79 */
80int connect_req_to_link_param(struct cfcnfg *cnfg,
81 struct caif_connect_request *con_req,
82 struct cfctrl_link_param *channel_setup_param);
83
84/**
85 * get_caif_conf() - Get the configuration handler.
86 */
87struct cfcnfg *get_caif_conf(void);
88
89
90#endif /* CAIF_DEV_H_ */
diff --git a/include/net/caif/caif_device.h b/include/net/caif/caif_device.h
new file mode 100644
index 000000000000..d02f044adb8a
--- /dev/null
+++ b/include/net/caif/caif_device.h
@@ -0,0 +1,55 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/ sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CAIF_DEVICE_H_
8#define CAIF_DEVICE_H_
9#include <linux/kernel.h>
10#include <linux/net.h>
11#include <linux/netdevice.h>
12#include <linux/caif/caif_socket.h>
13#include <net/caif/caif_device.h>
14
15/**
16 * struct caif_dev_common - data shared between CAIF drivers and stack.
17 * @flowctrl: Flow Control callback function. This function is
18 * supplied by CAIF Core Stack and is used by CAIF
19 * Link Layer to send flow-stop to CAIF Core.
20 * The flow information will be distributed to all
21 * clients of CAIF.
22 *
23 * @link_select: Profile of device, either high-bandwidth or
24 * low-latency. This member is set by CAIF Link
25 * Layer Device in order to indicate if this device
26 * is a high bandwidth or low latency device.
27 *
28 * @use_frag: CAIF Frames may be fragmented.
29 * Is set by CAIF Link Layer in order to indicate if the
30 * interface receives fragmented frames that must be
31 * assembled by CAIF Core Layer.
32 *
33 * @use_fcs: Indicate if Frame CheckSum (fcs) is used.
34 * Is set if the physical interface is
35 * using Frame Checksum on the CAIF Frames.
36 *
37 * @use_stx: Indicate STart of frame eXtension (stx) in use.
38 * Is set if the CAIF Link Layer expects
39 * CAIF Frames to start with the STX byte.
40 *
41 * This structure is shared between the CAIF drivers and the CAIF stack.
42 * It is used by the device to register its behavior.
43 * CAIF Core layer must set the member flowctrl in order to supply
44 * CAIF Link Layer with the flow control function.
45 *
46 */
47 struct caif_dev_common {
48 void (*flowctrl)(struct net_device *net, int on);
49 enum caif_link_selector link_select;
50 int use_frag;
51 int use_fcs;
52 int use_stx;
53};
54
55#endif /* CAIF_DEVICE_H_ */
diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h
new file mode 100644
index 000000000000..25c472f0e5b8
--- /dev/null
+++ b/include/net/caif/caif_layer.h
@@ -0,0 +1,283 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland / sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CAIF_LAYER_H_
8#define CAIF_LAYER_H_
9
10#include <linux/list.h>
11
12struct cflayer;
13struct cfpkt;
14struct cfpktq;
15struct caif_payload_info;
16struct caif_packet_funcs;
17
18#define CAIF_MAX_FRAMESIZE 4096
19#define CAIF_MAX_PAYLOAD_SIZE (4096 - 64)
20#define CAIF_NEEDED_HEADROOM (10)
21#define CAIF_NEEDED_TAILROOM (2)
22
23#define CAIF_LAYER_NAME_SZ 16
24#define CAIF_SUCCESS 1
25#define CAIF_FAILURE 0
26
27/**
28 * caif_assert() - Assert function for CAIF.
29 * @assert: expression to evaluate.
30 *
31 * This function will print a error message and a do WARN_ON if the
32 * assertion failes. Normally this will do a stack up at the current location.
33 */
34#define caif_assert(assert) \
35do { \
36 if (!(assert)) { \
37 pr_err("caif:Assert detected:'%s'\n", #assert); \
38 WARN_ON(!(assert)); \
39 } \
40} while (0)
41
42
43/**
44 * enum caif_ctrlcmd - CAIF Stack Control Signaling sent in layer.ctrlcmd().
45 *
46 * @CAIF_CTRLCMD_FLOW_OFF_IND: Flow Control is OFF, transmit function
47 * should stop sending data
48 *
49 * @CAIF_CTRLCMD_FLOW_ON_IND: Flow Control is ON, transmit function
50 * can start sending data
51 *
52 * @CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: Remote end modem has decided to close
53 * down channel
54 *
55 * @CAIF_CTRLCMD_INIT_RSP: Called initially when the layer below
56 * has finished initialization
57 *
58 * @CAIF_CTRLCMD_DEINIT_RSP: Called when de-initialization is
59 * complete
60 *
61 * @CAIF_CTRLCMD_INIT_FAIL_RSP: Called if initialization fails
62 *
63 * @_CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND: CAIF Link layer temporarily cannot
64 * send more packets.
65 * @_CAIF_CTRLCMD_PHYIF_FLOW_ON_IND: Called if CAIF Link layer is able
66 * to send packets again.
67 * @_CAIF_CTRLCMD_PHYIF_DOWN_IND: Called if CAIF Link layer is going
68 * down.
69 *
70 * These commands are sent upwards in the CAIF stack to the CAIF Client.
71 * They are used for signaling originating from the modem or CAIF Link Layer.
72 * These are either responses (*_RSP) or events (*_IND).
73 */
74enum caif_ctrlcmd {
75 CAIF_CTRLCMD_FLOW_OFF_IND,
76 CAIF_CTRLCMD_FLOW_ON_IND,
77 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND,
78 CAIF_CTRLCMD_INIT_RSP,
79 CAIF_CTRLCMD_DEINIT_RSP,
80 CAIF_CTRLCMD_INIT_FAIL_RSP,
81 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
82 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND,
83 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
84};
85
86/**
87 * enum caif_modemcmd - Modem Control Signaling, sent from CAIF Client
88 * to the CAIF Link Layer or modem.
89 *
90 * @CAIF_MODEMCMD_FLOW_ON_REQ: Flow Control is ON, transmit function
91 * can start sending data.
92 *
93 * @CAIF_MODEMCMD_FLOW_OFF_REQ: Flow Control is OFF, transmit function
94 * should stop sending data.
95 *
96 * @_CAIF_MODEMCMD_PHYIF_USEFULL: Notify physical layer that it is in use
97 *
98 * @_CAIF_MODEMCMD_PHYIF_USELESS: Notify physical layer that it is
99 * no longer in use.
100 *
101 * These are requests sent 'downwards' in the stack.
102 * Flow ON, OFF can be indicated to the modem.
103 */
104enum caif_modemcmd {
105 CAIF_MODEMCMD_FLOW_ON_REQ = 0,
106 CAIF_MODEMCMD_FLOW_OFF_REQ = 1,
107 _CAIF_MODEMCMD_PHYIF_USEFULL = 3,
108 _CAIF_MODEMCMD_PHYIF_USELESS = 4
109};
110
111/**
112 * enum caif_direction - CAIF Packet Direction.
113 * Indicate if a packet is to be sent out or to be received in.
114 * @CAIF_DIR_IN: Incoming packet received.
115 * @CAIF_DIR_OUT: Outgoing packet to be transmitted.
116 */
117enum caif_direction {
118 CAIF_DIR_IN = 0,
119 CAIF_DIR_OUT = 1
120};
121
122/**
123 * struct cflayer - CAIF Stack layer.
124 * Defines the framework for the CAIF Core Stack.
125 * @up: Pointer up to the layer above.
126 * @dn: Pointer down to the layer below.
127 * @node: List node used when layer participate in a list.
128 * @receive: Packet receive function.
129 * @transmit: Packet transmit funciton.
130 * @ctrlcmd: Used for control signalling upwards in the stack.
131 * @modemcmd: Used for control signaling downwards in the stack.
132 * @prio: Priority of this layer.
133 * @id: The identity of this layer
134 * @type: The type of this layer
135 * @name: Name of the layer.
136 *
137 * This structure defines the layered structure in CAIF.
138 *
139 * It defines CAIF layering structure, used by all CAIF Layers and the
140 * layers interfacing CAIF.
141 *
142 * In order to integrate with CAIF an adaptation layer on top of the CAIF stack
143 * and PHY layer below the CAIF stack
144 * must be implemented. These layer must follow the design principles below.
145 *
146 * Principles for layering of protocol layers:
147 * - All layers must use this structure. If embedding it, then place this
148 * structure first in the layer specific structure.
149 *
150 * - Each layer should not depend on any others layer private data.
151 *
152 * - In order to send data upwards do
153 * layer->up->receive(layer->up, packet);
154 *
155 * - In order to send data downwards do
156 * layer->dn->transmit(layer->dn, info, packet);
157 */
158struct cflayer {
159 struct cflayer *up;
160 struct cflayer *dn;
161 struct list_head node;
162
163 /*
164 * receive() - Receive Function.
165 * Contract: Each layer must implement a receive function passing the
166 * CAIF packets upwards in the stack.
167 * Packet handling rules:
168 * - The CAIF packet (cfpkt) cannot be accessed after
169 * passing it to the next layer using up->receive().
170 * - If parsing of the packet fails, the packet must be
171 * destroyed and -1 returned from the function.
172 * - If parsing succeeds (and above layers return OK) then
173 * the function must return a value > 0.
174 *
175 * Returns result < 0 indicates an error, 0 or positive value
176 * indicates success.
177 *
178 * @layr: Pointer to the current layer the receive function is
179 * implemented for (this pointer).
180 * @cfpkt: Pointer to CaifPacket to be handled.
181 */
182 int (*receive)(struct cflayer *layr, struct cfpkt *cfpkt);
183
184 /*
185 * transmit() - Transmit Function.
186 * Contract: Each layer must implement a transmit function passing the
187 * CAIF packet downwards in the stack.
188 * Packet handling rules:
189 * - The CAIF packet (cfpkt) ownership is passed to the
190 * transmit function. This means that the the packet
191 * cannot be accessed after passing it to the below
192 * layer using dn->transmit().
193 *
194 * - If transmit fails, however, the ownership is returned
195 * to thecaller. The caller of "dn->transmit()" must
196 * destroy or resend packet.
197 *
198 * - Return value less than zero means error, zero or
199 * greater than zero means OK.
200 *
201 * result < 0 indicates an error, 0 or positive value
202 * indicate success.
203 *
204 * @layr: Pointer to the current layer the receive function
205 * isimplemented for (this pointer).
206 * @cfpkt: Pointer to CaifPacket to be handled.
207 */
208 int (*transmit) (struct cflayer *layr, struct cfpkt *cfpkt);
209
210 /*
211 * cttrlcmd() - Control Function upwards in CAIF Stack.
212 * Used for signaling responses (CAIF_CTRLCMD_*_RSP)
213 * and asynchronous events from the modem (CAIF_CTRLCMD_*_IND)
214 *
215 * @layr: Pointer to the current layer the receive function
216 * is implemented for (this pointer).
217 * @ctrl: Control Command.
218 */
219 void (*ctrlcmd) (struct cflayer *layr, enum caif_ctrlcmd ctrl,
220 int phyid);
221
222 /*
223 * modemctrl() - Control Function used for controlling the modem.
224 * Used to signal down-wards in the CAIF stack.
225 * Returns 0 on success, < 0 upon failure.
226 *
227 * @layr: Pointer to the current layer the receive function
228 * is implemented for (this pointer).
229 * @ctrl: Control Command.
230 */
231 int (*modemcmd) (struct cflayer *layr, enum caif_modemcmd ctrl);
232
233 unsigned short prio;
234 unsigned int id;
235 unsigned int type;
236 char name[CAIF_LAYER_NAME_SZ];
237};
238
239/**
240 * layer_set_up() - Set the up pointer for a specified layer.
241 * @layr: Layer where up pointer shall be set.
242 * @above: Layer above.
243 */
244#define layer_set_up(layr, above) ((layr)->up = (struct cflayer *)(above))
245
246/**
247 * layer_set_dn() - Set the down pointer for a specified layer.
248 * @layr: Layer where down pointer shall be set.
249 * @below: Layer below.
250 */
251#define layer_set_dn(layr, below) ((layr)->dn = (struct cflayer *)(below))
252
253/**
254 * struct dev_info - Physical Device info information about physical layer.
255 * @dev: Pointer to native physical device.
256 * @id: Physical ID of the physical connection used by the
257 * logical CAIF connection. Used by service layers to
258 * identify their physical id to Caif MUX (CFMUXL)so
259 * that the MUX can add the correct physical ID to the
260 * packet.
261 */
262struct dev_info {
263 void *dev;
264 unsigned int id;
265};
266
267/**
268 * struct caif_payload_info - Payload information embedded in packet (sk_buff).
269 *
270 * @dev_info: Information about the receiving device.
271 *
272 * @hdr_len: Header length, used to align pay load on 32bit boundary.
273 *
274 * @channel_id: Channel ID of the logical CAIF connection.
275 * Used by mux to insert channel id into the caif packet.
276 */
277struct caif_payload_info {
278 struct dev_info *dev_info;
279 unsigned short hdr_len;
280 unsigned short channel_id;
281};
282
283#endif /* CAIF_LAYER_H_ */
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
new file mode 100644
index 000000000000..366082c5d435
--- /dev/null
+++ b/include/net/caif/cfcnfg.h
@@ -0,0 +1,133 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFCNFG_H_
8#define CFCNFG_H_
9#include <linux/spinlock.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfctrl.h>
12
13struct cfcnfg;
14
15/**
16 * enum cfcnfg_phy_type - Types of physical layers defined in CAIF Stack
17 *
18 * @CFPHYTYPE_FRAG: Fragmented frames physical interface.
19 * @CFPHYTYPE_CAIF: Generic CAIF physical interface
20 */
21enum cfcnfg_phy_type {
22 CFPHYTYPE_FRAG = 1,
23 CFPHYTYPE_CAIF,
24 CFPHYTYPE_MAX
25};
26
27/**
28 * enum cfcnfg_phy_preference - Physical preference HW Abstraction
29 *
30 * @CFPHYPREF_UNSPECIFIED: Default physical interface
31 *
32 * @CFPHYPREF_LOW_LAT: Default physical interface for low-latency
33 * traffic
34 * @CFPHYPREF_HIGH_BW: Default physical interface for high-bandwidth
35 * traffic
36 * @CFPHYPREF_LOOP: TEST only Loopback interface simulating modem
37 * responses.
38 *
39 */
40enum cfcnfg_phy_preference {
41 CFPHYPREF_UNSPECIFIED,
42 CFPHYPREF_LOW_LAT,
43 CFPHYPREF_HIGH_BW,
44 CFPHYPREF_LOOP
45};
46
47/**
48 * cfcnfg_create() - Create the CAIF configuration object.
49 */
50struct cfcnfg *cfcnfg_create(void);
51
52/**
53 * cfcnfg_remove() - Remove the CFCNFG object
54 * @cfg: config object
55 */
56void cfcnfg_remove(struct cfcnfg *cfg);
57
58/**
59 * cfcnfg_add_phy_layer() - Adds a physical layer to the CAIF stack.
60 * @cnfg: Pointer to a CAIF configuration object, created by
61 * cfcnfg_create().
62 * @phy_type: Specifies the type of physical interface, e.g.
63 * CFPHYTYPE_FRAG.
64 * @dev: Pointer to link layer device
65 * @phy_layer: Specify the physical layer. The transmit function
66 * MUST be set in the structure.
67 * @phyid: The assigned physical ID for this layer, used in
68 * cfcnfg_add_adapt_layer to specify PHY for the link.
69 * @pref: The phy (link layer) preference.
70 * @fcs: Specify if checksum is used in CAIF Framing Layer.
71 * @stx: Specify if Start Of Frame eXtention is used.
72 */
73
74void
75cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
76 void *dev, struct cflayer *phy_layer, u16 *phyid,
77 enum cfcnfg_phy_preference pref,
78 bool fcs, bool stx);
79
80/**
81 * cfcnfg_del_phy_layer - Deletes an phy layer from the CAIF stack.
82 *
83 * @cnfg: Pointer to a CAIF configuration object, created by
84 * cfcnfg_create().
85 * @phy_layer: Adaptation layer to be removed.
86 */
87int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer);
88
89/**
90 * cfcnfg_del_adapt_layer - Deletes an adaptation layer from the CAIF stack.
91 *
92 * @cnfg: Pointer to a CAIF configuration object, created by
93 * cfcnfg_create().
94 * @adap_layer: Adaptation layer to be removed.
95 */
96int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer);
97
98/**
99 * cfcnfg_add_adaptation_layer - Add an adaptation layer to the CAIF stack.
100 *
101 * The adaptation Layer is where the interface to application or higher-level
102 * driver functionality is implemented.
103 *
104 * @cnfg: Pointer to a CAIF configuration object, created by
105 * cfcnfg_create().
106 * @param: Link setup parameters.
107 * @adap_layer: Specify the adaptation layer; the receive and
108 * flow-control functions MUST be set in the structure.
109 *
110 */
111int
112cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
113 struct cfctrl_link_param *param,
114 struct cflayer *adap_layer);
115
116/**
117 * cfcnfg_get_phyid() - Get physical ID, given type.
118 * Returns one of the physical interfaces matching the given type.
119 * Zero if no match is found.
120 * @cnfg: Configuration object
121 * @phy_pref: Caif Link Layer preference
122 */
123struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
124 enum cfcnfg_phy_preference phy_pref);
125
126/**
127 * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
128 * @cnfg: Configuration object
129 * @name: Name of the Physical Layer (Caif Link Layer)
130 */
131int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
132
133#endif /* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
new file mode 100644
index 000000000000..dee25b86caa0
--- /dev/null
+++ b/include/net/caif/cfctrl.h
@@ -0,0 +1,138 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFCTRL_H_
8#define CFCTRL_H_
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfsrvl.h>
11
12/* CAIF Control packet commands */
13enum cfctrl_cmd {
14 CFCTRL_CMD_LINK_SETUP = 0,
15 CFCTRL_CMD_LINK_DESTROY = 1,
16 CFCTRL_CMD_LINK_ERR = 2,
17 CFCTRL_CMD_ENUM = 3,
18 CFCTRL_CMD_SLEEP = 4,
19 CFCTRL_CMD_WAKE = 5,
20 CFCTRL_CMD_LINK_RECONF = 6,
21 CFCTRL_CMD_START_REASON = 7,
22 CFCTRL_CMD_RADIO_SET = 8,
23 CFCTRL_CMD_MODEM_SET = 9,
24 CFCTRL_CMD_MASK = 0xf
25};
26
27/* Channel types */
28enum cfctrl_srv {
29 CFCTRL_SRV_DECM = 0,
30 CFCTRL_SRV_VEI = 1,
31 CFCTRL_SRV_VIDEO = 2,
32 CFCTRL_SRV_DBG = 3,
33 CFCTRL_SRV_DATAGRAM = 4,
34 CFCTRL_SRV_RFM = 5,
35 CFCTRL_SRV_UTIL = 6,
36 CFCTRL_SRV_MASK = 0xf
37};
38
39#define CFCTRL_RSP_BIT 0x20
40#define CFCTRL_ERR_BIT 0x10
41
42struct cfctrl_rsp {
43 void (*linksetup_rsp)(struct cflayer *layer, u8 linkid,
44 enum cfctrl_srv serv, u8 phyid,
45 struct cflayer *adapt_layer);
46 void (*linkdestroy_rsp)(struct cflayer *layer, u8 linkid,
47 struct cflayer *client_layer);
48 void (*linkerror_ind)(void);
49 void (*enum_rsp)(void);
50 void (*sleep_rsp)(void);
51 void (*wake_rsp)(void);
52 void (*restart_rsp)(void);
53 void (*radioset_rsp)(void);
54 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55 struct cflayer *client_layer);;
56};
57
58/* Link Setup Parameters for CAIF-Links. */
59struct cfctrl_link_param {
60 enum cfctrl_srv linktype;/* (T3,T0) Type of Channel */
61 u8 priority; /* (P4,P0) Priority of the channel */
62 u8 phyid; /* (U2-U0) Physical interface to connect */
63 u8 endpoint; /* (E1,E0) Endpoint for data channels */
64 u8 chtype; /* (H1,H0) Channel-Type, applies to
65 * VEI, DEBUG */
66 union {
67 struct {
68 u8 connid; /* (D7,D0) Video LinkId */
69 } video;
70
71 struct {
72 u32 connid; /* (N31,Ngit0) Connection ID used
73 * for Datagram */
74 } datagram;
75
76 struct {
77 u32 connid; /* Connection ID used for RFM */
78 char volume[20]; /* Volume to mount for RFM */
79 } rfm; /* Configuration for RFM */
80
81 struct {
82 u16 fifosize_kb; /* Psock FIFO size in KB */
83 u16 fifosize_bufs; /* Psock # signal buffers */
84 char name[16]; /* Name of the PSOCK service */
85 u8 params[255]; /* Link setup Parameters> */
86 u16 paramlen; /* Length of Link Setup
87 * Parameters */
88 } utility; /* Configuration for Utility Links (Psock) */
89 } u;
90};
91
92/* This structure is used internally in CFCTRL */
93struct cfctrl_request_info {
94 int sequence_no;
95 enum cfctrl_cmd cmd;
96 u8 channel_id;
97 struct cfctrl_link_param param;
98 struct cfctrl_request_info *next;
99 struct cflayer *client_layer;
100};
101
102struct cfctrl {
103 struct cfsrvl serv;
104 struct cfctrl_rsp res;
105 atomic_t req_seq_no;
106 atomic_t rsp_seq_no;
107 struct cfctrl_request_info *first_req;
108 /* Protects from simultaneous access to first_req list */
109 spinlock_t info_list_lock;
110#ifndef CAIF_NO_LOOP
111 u8 loop_linkid;
112 int loop_linkused[256];
113 /* Protects simultaneous access to loop_linkid and loop_linkused */
114 spinlock_t loop_linkid_lock;
115#endif
116
117};
118
119void cfctrl_enum_req(struct cflayer *cfctrl, u8 physlinkid);
120void cfctrl_linkup_request(struct cflayer *cfctrl,
121 struct cfctrl_link_param *param,
122 struct cflayer *user_layer);
123int cfctrl_linkdown_req(struct cflayer *cfctrl, u8 linkid,
124 struct cflayer *client);
125void cfctrl_sleep_req(struct cflayer *cfctrl);
126void cfctrl_wake_req(struct cflayer *cfctrl);
127void cfctrl_getstartreason_req(struct cflayer *cfctrl);
128struct cflayer *cfctrl_create(void);
129void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn);
130void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up);
131struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer);
132bool cfctrl_req_eq(struct cfctrl_request_info *r1,
133 struct cfctrl_request_info *r2);
134void cfctrl_insert_req(struct cfctrl *ctrl,
135 struct cfctrl_request_info *req);
136struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
137 struct cfctrl_request_info *req);
138#endif /* CFCTRL_H_ */
diff --git a/include/net/caif/cffrml.h b/include/net/caif/cffrml.h
new file mode 100644
index 000000000000..3f14d2e1ce61
--- /dev/null
+++ b/include/net/caif/cffrml.h
@@ -0,0 +1,16 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFFRML_H_
8#define CFFRML_H_
9#include <net/caif/caif_layer.h>
10
11struct cffrml;
12struct cflayer *cffrml_create(u16 phyid, bool DoFCS);
13void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up);
14void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn);
15
16#endif /* CFFRML_H_ */
diff --git a/include/net/caif/cfmuxl.h b/include/net/caif/cfmuxl.h
new file mode 100644
index 000000000000..4e1b4f33423e
--- /dev/null
+++ b/include/net/caif/cfmuxl.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFMUXL_H_
8#define CFMUXL_H_
9#include <net/caif/caif_layer.h>
10
11struct cfsrvl;
12struct cffrml;
13
14struct cflayer *cfmuxl_create(void);
15int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid);
16struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid);
17int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *up, u8 phyid);
18struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 linkid);
19bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid);
20u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id);
21
22#endif /* CFMUXL_H_ */
diff --git a/include/net/caif/cfpkt.h b/include/net/caif/cfpkt.h
new file mode 100644
index 000000000000..fbc681beff52
--- /dev/null
+++ b/include/net/caif/cfpkt.h
@@ -0,0 +1,274 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFPKT_H_
8#define CFPKT_H_
9#include <net/caif/caif_layer.h>
10#include <linux/types.h>
11struct cfpkt;
12
13/* Create a CAIF packet.
14 * len: Length of packet to be created
15 * @return New packet.
16 */
17struct cfpkt *cfpkt_create(u16 len);
18
19/* Create a CAIF packet.
20 * data Data to copy.
21 * len Length of packet to be created
22 * @return New packet.
23 */
24struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len);
25/*
26 * Destroy a CAIF Packet.
27 * pkt Packet to be destoyed.
28 */
29void cfpkt_destroy(struct cfpkt *pkt);
30
31/*
32 * Extract header from packet.
33 *
34 * pkt Packet to extract header data from.
35 * data Pointer to copy the header data into.
36 * len Length of head data to copy.
37 * @return zero on success and error code upon failure
38 */
39int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len);
40
41/*
42 * Peek header from packet.
43 * Reads data from packet without changing packet.
44 *
45 * pkt Packet to extract header data from.
46 * data Pointer to copy the header data into.
47 * len Length of head data to copy.
48 * @return zero on success and error code upon failure
49 */
50int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len);
51
52/*
53 * Extract header from trailer (end of packet).
54 *
55 * pkt Packet to extract header data from.
56 * data Pointer to copy the trailer data into.
57 * len Length of header data to copy.
58 * @return zero on success and error code upon failure
59 */
60int cfpkt_extr_trail(struct cfpkt *pkt, void *data, u16 len);
61
62/*
63 * Add header to packet.
64 *
65 *
66 * pkt Packet to add header data to.
67 * data Pointer to data to copy into the header.
68 * len Length of header data to copy.
69 * @return zero on success and error code upon failure
70 */
71int cfpkt_add_head(struct cfpkt *pkt, const void *data, u16 len);
72
73/*
74 * Add trailer to packet.
75 *
76 *
77 * pkt Packet to add trailer data to.
78 * data Pointer to data to copy into the trailer.
79 * len Length of trailer data to copy.
80 * @return zero on success and error code upon failure
81 */
82int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len);
83
84/*
85 * Pad trailer on packet.
86 * Moves data pointer in packet, no content copied.
87 *
88 * pkt Packet in which to pad trailer.
89 * len Length of padding to add.
90 * @return zero on success and error code upon failure
91 */
92int cfpkt_pad_trail(struct cfpkt *pkt, u16 len);
93
94/*
95 * Add a single byte to packet body (tail).
96 *
97 * pkt Packet in which to add byte.
98 * data Byte to add.
99 * @return zero on success and error code upon failure
100 */
101int cfpkt_addbdy(struct cfpkt *pkt, const u8 data);
102
103/*
104 * Add a data to packet body (tail).
105 *
106 * pkt Packet in which to add data.
107 * data Pointer to data to copy into the packet body.
108 * len Length of data to add.
109 * @return zero on success and error code upon failure
110 */
111int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len);
112
113/*
114 * Checks whether there are more data to process in packet.
115 * pkt Packet to check.
116 * @return true if more data are available in packet false otherwise
117 */
118bool cfpkt_more(struct cfpkt *pkt);
119
120/*
121 * Checks whether the packet is erroneous,
122 * i.e. if it has been attempted to extract more data than available in packet
123 * or writing more data than has been allocated in cfpkt_create().
124 * pkt Packet to check.
125 * @return true on error false otherwise
126 */
127bool cfpkt_erroneous(struct cfpkt *pkt);
128
129/*
130 * Get the packet length.
131 * pkt Packet to get length from.
132 * @return Number of bytes in packet.
133 */
134u16 cfpkt_getlen(struct cfpkt *pkt);
135
136/*
137 * Set the packet length, by adjusting the trailer pointer according to length.
138 * pkt Packet to set length.
139 * len Packet length.
140 * @return Number of bytes in packet.
141 */
142int cfpkt_setlen(struct cfpkt *pkt, u16 len);
143
144/*
145 * cfpkt_append - Appends a packet's data to another packet.
146 * dstpkt: Packet to append data into, WILL BE FREED BY THIS FUNCTION
147 * addpkt: Packet to be appended and automatically released,
148 * WILL BE FREED BY THIS FUNCTION.
149 * expectlen: Packet's expected total length. This should be considered
150 * as a hint.
151 * NB: Input packets will be destroyed after appending and cannot be used
152 * after calling this function.
153 * @return The new appended packet.
154 */
155struct cfpkt *cfpkt_append(struct cfpkt *dstpkt, struct cfpkt *addpkt,
156 u16 expectlen);
157
158/*
159 * cfpkt_split - Split a packet into two packets at the specified split point.
160 * pkt: Packet to be split (will contain the first part of the data on exit)
161 * pos: Position to split packet in two parts.
162 * @return The new packet, containing the second part of the data.
163 */
164struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos);
165
166/*
167 * Iteration function, iterates the packet buffers from start to end.
168 *
169 * Checksum iteration function used to iterate buffers
170 * (we may have packets consisting of a chain of buffers)
171 * pkt: Packet to calculate checksum for
172 * iter_func: Function pointer to iteration function
173 * chks: Checksum calculated so far.
174 * buf: Pointer to the buffer to checksum
175 * len: Length of buf.
176 * data: Initial checksum value.
177 * @return Checksum of buffer.
178 */
179
180u16 cfpkt_iterate(struct cfpkt *pkt,
181 u16 (*iter_func)(u16 chks, void *buf, u16 len),
182 u16 data);
183
184/* Append by giving user access to packet buffer
185 * cfpkt Packet to append to
186 * buf Buffer inside pkt that user shall copy data into
187 * buflen Length of buffer and number of bytes added to packet
188 * @return 0 on error, 1 on success
189 */
190int cfpkt_raw_append(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
191
192/* Extract by giving user access to packet buffer
193 * cfpkt Packet to extract from
194 * buf Buffer inside pkt that user shall copy data from
195 * buflen Length of buffer and number of bytes removed from packet
196 * @return 0 on error, 1 on success
197 */
198int cfpkt_raw_extract(struct cfpkt *cfpkt, void **buf, unsigned int buflen);
199
200/* Map from a "native" packet (e.g. Linux Socket Buffer) to a CAIF packet.
201 * dir - Direction indicating whether this packet is to be sent or received.
202 * nativepkt - The native packet to be transformed to a CAIF packet
203 * @return The mapped CAIF Packet CFPKT.
204 */
205struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt);
206
207/* Map from a CAIF packet to a "native" packet (e.g. Linux Socket Buffer).
208 * pkt - The CAIF packet to be transformed into a "native" packet.
209 * @return The native packet transformed from a CAIF packet.
210 */
211void *cfpkt_tonative(struct cfpkt *pkt);
212
213/*
214 * Insert a packet in the packet queue.
215 * pktq Packet queue to insert into
216 * pkt Packet to be inserted in queue
217 * prio Priority of packet
218 */
219void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt,
220 unsigned short prio);
221
222/*
223 * Remove a packet from the packet queue.
224 * pktq Packet queue to fetch packets from.
225 * @return Dequeued packet.
226 */
227struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq);
228
229/*
230 * Peek into a packet from the packet queue.
231 * pktq Packet queue to fetch packets from.
232 * @return Peeked packet.
233 */
234struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq);
235
236/*
237 * Initiates the packet queue.
238 * @return Pointer to new packet queue.
239 */
240struct cfpktq *cfpktq_create(void);
241
242/*
243 * Get the number of packets in the queue.
244 * pktq Packet queue to fetch count from.
245 * @return Number of packets in queue.
246 */
247int cfpkt_qcount(struct cfpktq *pktq);
248
249/*
250 * Put content of packet into buffer for debuging purposes.
251 * pkt Packet to copy data from
252 * buf Buffer to copy data into
253 * buflen Length of data to copy
254 * @return Pointer to copied data
255 */
256char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen);
257
258/*
259 * Clones a packet and releases the original packet.
260 * This is used for taking ownership of a packet e.g queueing.
261 * pkt Packet to clone and release.
262 * @return Cloned packet.
263 */
264struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt);
265
266
267/*
268 * Returns packet information for a packet.
269 * pkt Packet to get info from;
270 * @return Packet information
271 */
272struct caif_payload_info *cfpkt_info(struct cfpkt *pkt);
273/*! @} */
274#endif /* CFPKT_H_ */
diff --git a/include/net/caif/cfserl.h b/include/net/caif/cfserl.h
new file mode 100644
index 000000000000..b8374321b362
--- /dev/null
+++ b/include/net/caif/cfserl.h
@@ -0,0 +1,12 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFSERL_H_
8#define CFSERL_H_
9#include <net/caif/caif_layer.h>
10
11struct cflayer *cfserl_create(int type, int instance, bool use_stx);
12#endif /* CFSERL_H_ */
diff --git a/include/net/caif/cfsrvl.h b/include/net/caif/cfsrvl.h
new file mode 100644
index 000000000000..b2a12db20cd2
--- /dev/null
+++ b/include/net/caif/cfsrvl.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#ifndef CFSRVL_H_
8#define CFSRVL_H_
9#include <linux/list.h>
10#include <linux/stddef.h>
11#include <linux/types.h>
12struct cfsrvl {
13 struct cflayer layer;
14 bool open;
15 bool phy_flow_on;
16 bool modem_flow_on;
17 struct dev_info dev_info;
18};
19
20struct cflayer *cfvei_create(u8 linkid, struct dev_info *dev_info);
21struct cflayer *cfdgml_create(u8 linkid, struct dev_info *dev_info);
22struct cflayer *cfutill_create(u8 linkid, struct dev_info *dev_info);
23struct cflayer *cfvidl_create(u8 linkid, struct dev_info *dev_info);
24struct cflayer *cfrfml_create(u8 linkid, struct dev_info *dev_info);
25struct cflayer *cfdbgl_create(u8 linkid, struct dev_info *dev_info);
26bool cfsrvl_phyid_match(struct cflayer *layer, int phyid);
27void cfservl_destroy(struct cflayer *layer);
28void cfsrvl_init(struct cfsrvl *service,
29 u8 channel_id,
30 struct dev_info *dev_info);
31bool cfsrvl_ready(struct cfsrvl *service, int *err);
32u8 cfsrvl_getphyid(struct cflayer *layer);
33
34#endif /* CFSRVL_H_ */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 3d134a1fb96b..37cebd3aa0f7 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -704,6 +704,10 @@ struct cfg80211_crypto_settings {
704 * @key_len: length of WEP key for shared key authentication 704 * @key_len: length of WEP key for shared key authentication
705 * @key_idx: index of WEP key for shared key authentication 705 * @key_idx: index of WEP key for shared key authentication
706 * @key: WEP key for shared key authentication 706 * @key: WEP key for shared key authentication
707 * @local_state_change: This is a request for a local state only, i.e., no
708 * Authentication frame is to be transmitted and authentication state is
709 * to be changed without having to wait for a response from the peer STA
710 * (AP).
707 */ 711 */
708struct cfg80211_auth_request { 712struct cfg80211_auth_request {
709 struct cfg80211_bss *bss; 713 struct cfg80211_bss *bss;
@@ -712,6 +716,7 @@ struct cfg80211_auth_request {
712 enum nl80211_auth_type auth_type; 716 enum nl80211_auth_type auth_type;
713 const u8 *key; 717 const u8 *key;
714 u8 key_len, key_idx; 718 u8 key_len, key_idx;
719 bool local_state_change;
715}; 720};
716 721
717/** 722/**
@@ -744,12 +749,15 @@ struct cfg80211_assoc_request {
744 * @ie: Extra IEs to add to Deauthentication frame or %NULL 749 * @ie: Extra IEs to add to Deauthentication frame or %NULL
745 * @ie_len: Length of ie buffer in octets 750 * @ie_len: Length of ie buffer in octets
746 * @reason_code: The reason code for the deauthentication 751 * @reason_code: The reason code for the deauthentication
752 * @local_state_change: This is a request for a local state only, i.e., no
753 * Deauthentication frame is to be transmitted.
747 */ 754 */
748struct cfg80211_deauth_request { 755struct cfg80211_deauth_request {
749 struct cfg80211_bss *bss; 756 struct cfg80211_bss *bss;
750 const u8 *ie; 757 const u8 *ie;
751 size_t ie_len; 758 size_t ie_len;
752 u16 reason_code; 759 u16 reason_code;
760 bool local_state_change;
753}; 761};
754 762
755/** 763/**
@@ -762,12 +770,15 @@ struct cfg80211_deauth_request {
762 * @ie: Extra IEs to add to Disassociation frame or %NULL 770 * @ie: Extra IEs to add to Disassociation frame or %NULL
763 * @ie_len: Length of ie buffer in octets 771 * @ie_len: Length of ie buffer in octets
764 * @reason_code: The reason code for the disassociation 772 * @reason_code: The reason code for the disassociation
773 * @local_state_change: This is a request for a local state only, i.e., no
774 * Disassociation frame is to be transmitted.
765 */ 775 */
766struct cfg80211_disassoc_request { 776struct cfg80211_disassoc_request {
767 struct cfg80211_bss *bss; 777 struct cfg80211_bss *bss;
768 const u8 *ie; 778 const u8 *ie;
769 size_t ie_len; 779 size_t ie_len;
770 u16 reason_code; 780 u16 reason_code;
781 bool local_state_change;
771}; 782};
772 783
773/** 784/**
@@ -1007,6 +1018,7 @@ struct cfg80211_pmksa {
1007 * RSN IE. It allows for faster roaming between WPA2 BSSIDs. 1018 * RSN IE. It allows for faster roaming between WPA2 BSSIDs.
1008 * @del_pmksa: Delete a cached PMKID. 1019 * @del_pmksa: Delete a cached PMKID.
1009 * @flush_pmksa: Flush all cached PMKIDs. 1020 * @flush_pmksa: Flush all cached PMKIDs.
1021 * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold.
1010 * 1022 *
1011 */ 1023 */
1012struct cfg80211_ops { 1024struct cfg80211_ops {
@@ -1152,6 +1164,10 @@ struct cfg80211_ops {
1152 1164
1153 int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev, 1165 int (*set_power_mgmt)(struct wiphy *wiphy, struct net_device *dev,
1154 bool enabled, int timeout); 1166 bool enabled, int timeout);
1167
1168 int (*set_cqm_rssi_config)(struct wiphy *wiphy,
1169 struct net_device *dev,
1170 s32 rssi_thold, u32 rssi_hyst);
1155}; 1171};
1156 1172
1157/* 1173/*
@@ -2337,4 +2353,18 @@ bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf,
2337void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, 2353void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
2338 const u8 *buf, size_t len, bool ack, gfp_t gfp); 2354 const u8 *buf, size_t len, bool ack, gfp_t gfp);
2339 2355
2356
2357/**
2358 * cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
2359 * @dev: network device
2360 * @rssi_event: the triggered RSSI event
2361 * @gfp: context flags
2362 *
2363 * This function is called when a configured connection quality monitoring
2364 * rssi threshold reached event occurs.
2365 */
2366void cfg80211_cqm_rssi_notify(struct net_device *dev,
2367 enum nl80211_cqm_rssi_threshold_event rssi_event,
2368 gfp_t gfp);
2369
2340#endif /* __NET_CFG80211_H */ 2370#endif /* __NET_CFG80211_H */
diff --git a/include/net/dn_fib.h b/include/net/dn_fib.h
index 52da6c3dd50d..bbcde3238e58 100644
--- a/include/net/dn_fib.h
+++ b/include/net/dn_fib.h
@@ -50,10 +50,6 @@ struct dn_fib_info {
50 __le16 fib_prefsrc; 50 __le16 fib_prefsrc;
51 __u32 fib_priority; 51 __u32 fib_priority;
52 __u32 fib_metrics[RTAX_MAX]; 52 __u32 fib_metrics[RTAX_MAX];
53#define dn_fib_mtu fib_metrics[RTAX_MTU-1]
54#define dn_fib_window fib_metrics[RTAX_WINDOW-1]
55#define dn_fib_rtt fib_metrics[RTAX_RTT-1]
56#define dn_fib_advmss fib_metrics[RTAX_ADVMSS-1]
57 int fib_nhs; 53 int fib_nhs;
58 int fib_power; 54 int fib_power;
59 struct dn_fib_nh fib_nh[0]; 55 struct dn_fib_nh fib_nh[0];
diff --git a/include/net/dst.h b/include/net/dst.h
index ce078cda6b74..aac5a5fcfda9 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -225,21 +225,6 @@ static inline void dst_confirm(struct dst_entry *dst)
225 neigh_confirm(dst->neighbour); 225 neigh_confirm(dst->neighbour);
226} 226}
227 227
228static inline void dst_negative_advice(struct dst_entry **dst_p,
229 struct sock *sk)
230{
231 struct dst_entry * dst = *dst_p;
232 if (dst && dst->ops->negative_advice) {
233 *dst_p = dst->ops->negative_advice(dst);
234
235 if (dst != *dst_p) {
236 extern void sk_reset_txq(struct sock *sk);
237
238 sk_reset_txq(sk);
239 }
240 }
241}
242
243static inline void dst_link_failure(struct sk_buff *skb) 228static inline void dst_link_failure(struct sk_buff *skb)
244{ 229{
245 struct dst_entry *dst = skb_dst(skb); 230 struct dst_entry *dst = skb_dst(skb);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index c49086d2bc7d..52bd9e6c9141 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -114,4 +114,5 @@ extern int fib_rules_lookup(struct fib_rules_ops *,
114extern int fib_default_rule_add(struct fib_rules_ops *, 114extern int fib_default_rule_add(struct fib_rules_ops *,
115 u32 pref, u32 table, 115 u32 pref, u32 table,
116 u32 flags); 116 u32 flags);
117extern u32 fib_default_rule_pref(struct fib_rules_ops *ops);
117#endif 118#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 809970b7dfee..bb08692a20b0 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -86,11 +86,26 @@ struct flowi {
86 86
87struct net; 87struct net;
88struct sock; 88struct sock;
89typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family, 89struct flow_cache_ops;
90 u8 dir, void **objp, atomic_t **obj_refp); 90
91struct flow_cache_object {
92 const struct flow_cache_ops *ops;
93};
94
95struct flow_cache_ops {
96 struct flow_cache_object *(*get)(struct flow_cache_object *);
97 int (*check)(struct flow_cache_object *);
98 void (*delete)(struct flow_cache_object *);
99};
100
101typedef struct flow_cache_object *(*flow_resolve_t)(
102 struct net *net, struct flowi *key, u16 family,
103 u8 dir, struct flow_cache_object *oldobj, void *ctx);
104
105extern struct flow_cache_object *flow_cache_lookup(
106 struct net *net, struct flowi *key, u16 family,
107 u8 dir, flow_resolve_t resolver, void *ctx);
91 108
92extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
93 u8 dir, flow_resolve_t resolver);
94extern void flow_cache_flush(void); 109extern void flow_cache_flush(void);
95extern atomic_t flow_cache_genid; 110extern atomic_t flow_cache_genid;
96 111
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 15b3dfe9fce8..6e991e0d0d6f 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -48,15 +48,4 @@ extern void icmp_out_count(struct net *net, unsigned char type);
48/* Move into dst.h ? */ 48/* Move into dst.h ? */
49extern int xrlim_allow(struct dst_entry *dst, int timeout); 49extern int xrlim_allow(struct dst_entry *dst, int timeout);
50 50
51struct raw_sock {
52 /* inet_sock has to be the first member */
53 struct inet_sock inet;
54 struct icmp_filter filter;
55};
56
57static inline struct raw_sock *raw_sk(const struct sock *sk)
58{
59 return (struct raw_sock *)sk;
60}
61
62#endif /* _ICMP_H */ 51#endif /* _ICMP_H */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 545d8b059bef..13f9fc086d54 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -54,16 +54,17 @@ struct inet6_ifaddr {
54 struct inet6_dev *idev; 54 struct inet6_dev *idev;
55 struct rt6_info *rt; 55 struct rt6_info *rt;
56 56
57 struct inet6_ifaddr *lst_next; /* next addr in addr_lst */ 57 struct hlist_node addr_lst;
58 struct inet6_ifaddr *if_next; /* next addr in inet6_dev */ 58 struct list_head if_list;
59 59
60#ifdef CONFIG_IPV6_PRIVACY 60#ifdef CONFIG_IPV6_PRIVACY
61 struct inet6_ifaddr *tmp_next; /* next addr in tempaddr_lst */ 61 struct list_head tmp_list;
62 struct inet6_ifaddr *ifpub; 62 struct inet6_ifaddr *ifpub;
63 int regen_count; 63 int regen_count;
64#endif 64#endif
65 65
66 int dead; 66 int dead;
67 struct rcu_head rcu;
67}; 68};
68 69
69struct ip6_sf_socklist { 70struct ip6_sf_socklist {
@@ -151,9 +152,9 @@ struct ipv6_devstat {
151}; 152};
152 153
153struct inet6_dev { 154struct inet6_dev {
154 struct net_device *dev; 155 struct net_device *dev;
155 156
156 struct inet6_ifaddr *addr_list; 157 struct list_head addr_list;
157 158
158 struct ifmcaddr6 *mc_list; 159 struct ifmcaddr6 *mc_list;
159 struct ifmcaddr6 *mc_tomb; 160 struct ifmcaddr6 *mc_tomb;
@@ -175,7 +176,7 @@ struct inet6_dev {
175#ifdef CONFIG_IPV6_PRIVACY 176#ifdef CONFIG_IPV6_PRIVACY
176 u8 rndid[8]; 177 u8 rndid[8];
177 struct timer_list regen_timer; 178 struct timer_list regen_timer;
178 struct inet6_ifaddr *tempaddr_list; 179 struct list_head tempaddr_list;
179#endif 180#endif
180 181
181 struct neigh_parms *nd_parms; 182 struct neigh_parms *nd_parms;
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index f13ddc2543b1..aae08f686633 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -38,5 +38,5 @@ extern void inet6_csk_reqsk_queue_hash_add(struct sock *sk,
38 38
39extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr); 39extern void inet6_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr);
40 40
41extern int inet6_csk_xmit(struct sk_buff *skb, int ipfragok); 41extern int inet6_csk_xmit(struct sk_buff *skb);
42#endif /* _INET6_CONNECTION_SOCK_H */ 42#endif /* _INET6_CONNECTION_SOCK_H */
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 696d6e4ce68a..b6d3b55da19b 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -36,9 +36,8 @@ struct tcp_congestion_ops;
36 * (i.e. things that depend on the address family) 36 * (i.e. things that depend on the address family)
37 */ 37 */
38struct inet_connection_sock_af_ops { 38struct inet_connection_sock_af_ops {
39 int (*queue_xmit)(struct sk_buff *skb, int ipfragok); 39 int (*queue_xmit)(struct sk_buff *skb);
40 void (*send_check)(struct sock *sk, int len, 40 void (*send_check)(struct sock *sk, struct sk_buff *skb);
41 struct sk_buff *skb);
42 int (*rebuild_header)(struct sock *sk); 41 int (*rebuild_header)(struct sock *sk);
43 int (*conn_request)(struct sock *sk, struct sk_buff *skb); 42 int (*conn_request)(struct sock *sk, struct sk_buff *skb);
44 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb, 43 struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83fd34437cf1..b487bc1b99ab 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -21,6 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/types.h> 22#include <linux/types.h>
23#include <linux/jhash.h> 23#include <linux/jhash.h>
24#include <linux/netdevice.h>
24 25
25#include <net/flow.h> 26#include <net/flow.h>
26#include <net/sock.h> 27#include <net/sock.h>
@@ -101,6 +102,7 @@ struct rtable;
101 * @uc_ttl - Unicast TTL 102 * @uc_ttl - Unicast TTL
102 * @inet_sport - Source port 103 * @inet_sport - Source port
103 * @inet_id - ID counter for DF pkts 104 * @inet_id - ID counter for DF pkts
105 * @rxhash - flow hash received from netif layer
104 * @tos - TOS 106 * @tos - TOS
105 * @mc_ttl - Multicasting TTL 107 * @mc_ttl - Multicasting TTL
106 * @is_icsk - is this an inet_connection_sock? 108 * @is_icsk - is this an inet_connection_sock?
@@ -124,6 +126,9 @@ struct inet_sock {
124 __u16 cmsg_flags; 126 __u16 cmsg_flags;
125 __be16 inet_sport; 127 __be16 inet_sport;
126 __u16 inet_id; 128 __u16 inet_id;
129#ifdef CONFIG_RPS
130 __u32 rxhash;
131#endif
127 132
128 struct ip_options *opt; 133 struct ip_options *opt;
129 __u8 tos; 134 __u8 tos;
@@ -219,4 +224,37 @@ static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
219 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0; 224 return inet_sk(sk)->transparent ? FLOWI_FLAG_ANYSRC : 0;
220} 225}
221 226
227static inline void inet_rps_record_flow(const struct sock *sk)
228{
229#ifdef CONFIG_RPS
230 struct rps_sock_flow_table *sock_flow_table;
231
232 rcu_read_lock();
233 sock_flow_table = rcu_dereference(rps_sock_flow_table);
234 rps_record_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
235 rcu_read_unlock();
236#endif
237}
238
239static inline void inet_rps_reset_flow(const struct sock *sk)
240{
241#ifdef CONFIG_RPS
242 struct rps_sock_flow_table *sock_flow_table;
243
244 rcu_read_lock();
245 sock_flow_table = rcu_dereference(rps_sock_flow_table);
246 rps_reset_sock_flow(sock_flow_table, inet_sk(sk)->rxhash);
247 rcu_read_unlock();
248#endif
249}
250
251static inline void inet_rps_save_rxhash(const struct sock *sk, u32 rxhash)
252{
253#ifdef CONFIG_RPS
254 if (unlikely(inet_sk(sk)->rxhash != rxhash)) {
255 inet_rps_reset_flow(sk);
256 inet_sk(sk)->rxhash = rxhash;
257 }
258#endif
259}
222#endif /* _INET_SOCK_H */ 260#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 503994a38ed1..a84ceb692687 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -101,7 +101,7 @@ extern int ip_do_nat(struct sk_buff *skb);
101extern void ip_send_check(struct iphdr *ip); 101extern void ip_send_check(struct iphdr *ip);
102extern int __ip_local_out(struct sk_buff *skb); 102extern int __ip_local_out(struct sk_buff *skb);
103extern int ip_local_out(struct sk_buff *skb); 103extern int ip_local_out(struct sk_buff *skb);
104extern int ip_queue_xmit(struct sk_buff *skb, int ipfragok); 104extern int ip_queue_xmit(struct sk_buff *skb);
105extern void ip_init(void); 105extern void ip_init(void);
106extern int ip_append_data(struct sock *sk, 106extern int ip_append_data(struct sock *sk,
107 int getfrag(void *from, char *to, int offset, int len, 107 int getfrag(void *from, char *to, int offset, int len,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 86f46c49e318..4b1dc1161c37 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -88,34 +88,37 @@ struct rt6_info {
88 struct dst_entry dst; 88 struct dst_entry dst;
89 } u; 89 } u;
90 90
91 struct inet6_dev *rt6i_idev;
92
93#define rt6i_dev u.dst.dev 91#define rt6i_dev u.dst.dev
94#define rt6i_nexthop u.dst.neighbour 92#define rt6i_nexthop u.dst.neighbour
95#define rt6i_expires u.dst.expires 93#define rt6i_expires u.dst.expires
96 94
95 /*
96 * Tail elements of dst_entry (__refcnt etc.)
97 * and these elements (rarely used in hot path) are in
98 * the same cache line.
99 */
100 struct fib6_table *rt6i_table;
97 struct fib6_node *rt6i_node; 101 struct fib6_node *rt6i_node;
98 102
99 struct in6_addr rt6i_gateway; 103 struct in6_addr rt6i_gateway;
100
101 u32 rt6i_flags;
102 u32 rt6i_metric;
103 atomic_t rt6i_ref;
104 104
105 /* more non-fragment space at head required */ 105 atomic_t rt6i_ref;
106 unsigned short rt6i_nfheader_len;
107
108 u8 rt6i_protocol;
109 106
110 struct fib6_table *rt6i_table; 107 /* These are in a separate cache line. */
108 struct rt6key rt6i_dst ____cacheline_aligned_in_smp;
109 u32 rt6i_flags;
110 struct rt6key rt6i_src;
111 u32 rt6i_metric;
111 112
112 struct rt6key rt6i_dst; 113 struct inet6_dev *rt6i_idev;
113 114
114#ifdef CONFIG_XFRM 115#ifdef CONFIG_XFRM
115 u32 rt6i_flow_cache_genid; 116 u32 rt6i_flow_cache_genid;
116#endif 117#endif
118 /* more non-fragment space at head required */
119 unsigned short rt6i_nfheader_len;
117 120
118 struct rt6key rt6i_src; 121 u8 rt6i_protocol;
119}; 122};
120 123
121static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst) 124static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 68f67836e146..278312c95f96 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -152,9 +152,9 @@ static inline void __ip6_dst_store(struct sock *sk, struct dst_entry *dst,
152static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, 152static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst,
153 struct in6_addr *daddr, struct in6_addr *saddr) 153 struct in6_addr *daddr, struct in6_addr *saddr)
154{ 154{
155 write_lock(&sk->sk_dst_lock); 155 spin_lock(&sk->sk_dst_lock);
156 __ip6_dst_store(sk, dst, daddr, saddr); 156 __ip6_dst_store(sk, dst, daddr, saddr);
157 write_unlock(&sk->sk_dst_lock); 157 spin_unlock(&sk->sk_dst_lock);
158} 158}
159 159
160static inline int ipv6_unicast_destination(struct sk_buff *skb) 160static inline int ipv6_unicast_destination(struct sk_buff *skb)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index e72fb10ce573..b1d8db90b214 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -422,7 +422,7 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a
422 for (i = 0; i < addrlen; i++) { 422 for (i = 0; i < addrlen; i++) {
423 __be32 xb = a1[i] ^ a2[i]; 423 __be32 xb = a1[i] ^ a2[i];
424 if (xb) 424 if (xb)
425 return i * 32 + 32 - fls(ntohl(xb)); 425 return i * 32 + 31 - __fls(ntohl(xb));
426 } 426 }
427 427
428 /* 428 /*
@@ -482,8 +482,7 @@ extern int ip6_rcv_finish(struct sk_buff *skb);
482extern int ip6_xmit(struct sock *sk, 482extern int ip6_xmit(struct sock *sk,
483 struct sk_buff *skb, 483 struct sk_buff *skb,
484 struct flowi *fl, 484 struct flowi *fl,
485 struct ipv6_txoptions *opt, 485 struct ipv6_txoptions *opt);
486 int ipfragok);
487 486
488extern int ip6_nd_hdr(struct sock *sk, 487extern int ip6_nd_hdr(struct sock *sk,
489 struct sk_buff *skb, 488 struct sk_buff *skb,
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index b2b98f3fa265..3afdb21cc31d 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -323,7 +323,7 @@ typedef int (*iw_handler)(struct net_device *dev, struct iw_request_info *info,
323struct iw_handler_def { 323struct iw_handler_def {
324 324
325 /* Array of handlers for standard ioctls 325 /* Array of handlers for standard ioctls
326 * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT] 326 * We will call dev->wireless_handlers->standard[ioctl - SIOCIWFIRST]
327 */ 327 */
328 const iw_handler * standard; 328 const iw_handler * standard;
329 /* Number of handlers defined (more precisely, index of the 329 /* Number of handlers defined (more precisely, index of the
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 45d7d44d7cbe..344e5bf72062 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -144,6 +144,7 @@ struct ieee80211_low_level_stats {
144 * new beacon (beaconing modes) 144 * new beacon (beaconing modes)
145 * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be 145 * @BSS_CHANGED_BEACON_ENABLED: Beaconing should be
146 * enabled/disabled (beaconing modes) 146 * enabled/disabled (beaconing modes)
147 * @BSS_CHANGED_CQM: Connection quality monitor config changed
147 */ 148 */
148enum ieee80211_bss_change { 149enum ieee80211_bss_change {
149 BSS_CHANGED_ASSOC = 1<<0, 150 BSS_CHANGED_ASSOC = 1<<0,
@@ -156,6 +157,7 @@ enum ieee80211_bss_change {
156 BSS_CHANGED_BSSID = 1<<7, 157 BSS_CHANGED_BSSID = 1<<7,
157 BSS_CHANGED_BEACON = 1<<8, 158 BSS_CHANGED_BEACON = 1<<8,
158 BSS_CHANGED_BEACON_ENABLED = 1<<9, 159 BSS_CHANGED_BEACON_ENABLED = 1<<9,
160 BSS_CHANGED_CQM = 1<<10,
159}; 161};
160 162
161/** 163/**
@@ -185,6 +187,9 @@ enum ieee80211_bss_change {
185 * @enable_beacon: whether beaconing should be enabled or not 187 * @enable_beacon: whether beaconing should be enabled or not
186 * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info). 188 * @ht_operation_mode: HT operation mode (like in &struct ieee80211_ht_info).
187 * This field is only valid when the channel type is one of the HT types. 189 * This field is only valid when the channel type is one of the HT types.
190 * @cqm_rssi_thold: Connection quality monitor RSSI threshold, a zero value
191 * implies disabled
192 * @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
188 */ 193 */
189struct ieee80211_bss_conf { 194struct ieee80211_bss_conf {
190 const u8 *bssid; 195 const u8 *bssid;
@@ -202,6 +207,8 @@ struct ieee80211_bss_conf {
202 u64 timestamp; 207 u64 timestamp;
203 u32 basic_rates; 208 u32 basic_rates;
204 u16 ht_operation_mode; 209 u16 ht_operation_mode;
210 s32 cqm_rssi_thold;
211 u32 cqm_rssi_hyst;
205}; 212};
206 213
207/** 214/**
@@ -543,7 +550,7 @@ enum mac80211_rx_flags {
543 * @signal: signal strength when receiving this frame, either in dBm, in dB or 550 * @signal: signal strength when receiving this frame, either in dBm, in dB or
544 * unspecified depending on the hardware capabilities flags 551 * unspecified depending on the hardware capabilities flags
545 * @IEEE80211_HW_SIGNAL_* 552 * @IEEE80211_HW_SIGNAL_*
546 * @noise: noise when receiving this frame, in dBm. 553 * @noise: noise when receiving this frame, in dBm (DEPRECATED).
547 * @antenna: antenna used 554 * @antenna: antenna used
548 * @rate_idx: index of data rate into band's supported rates or MCS index if 555 * @rate_idx: index of data rate into band's supported rates or MCS index if
549 * HT rates are use (RX_FLAG_HT) 556 * HT rates are use (RX_FLAG_HT)
@@ -554,7 +561,7 @@ struct ieee80211_rx_status {
554 enum ieee80211_band band; 561 enum ieee80211_band band;
555 int freq; 562 int freq;
556 int signal; 563 int signal;
557 int noise; 564 int noise __deprecated;
558 int antenna; 565 int antenna;
559 int rate_idx; 566 int rate_idx;
560 int flag; 567 int flag;
@@ -580,11 +587,15 @@ struct ieee80211_rx_status {
580 * may turn the device off as much as possible. Typically, this flag will 587 * may turn the device off as much as possible. Typically, this flag will
581 * be set when an interface is set UP but not associated or scanning, but 588 * be set when an interface is set UP but not associated or scanning, but
582 * it can also be unset in that case when monitor interfaces are active. 589 * it can also be unset in that case when monitor interfaces are active.
590 * @IEEE80211_CONF_QOS: Enable 802.11e QoS also know as WMM (Wireless
591 * Multimedia). On some drivers (iwlwifi is one of know) we have
592 * to enable/disable QoS explicitly.
583 */ 593 */
584enum ieee80211_conf_flags { 594enum ieee80211_conf_flags {
585 IEEE80211_CONF_MONITOR = (1<<0), 595 IEEE80211_CONF_MONITOR = (1<<0),
586 IEEE80211_CONF_PS = (1<<1), 596 IEEE80211_CONF_PS = (1<<1),
587 IEEE80211_CONF_IDLE = (1<<2), 597 IEEE80211_CONF_IDLE = (1<<2),
598 IEEE80211_CONF_QOS = (1<<3),
588}; 599};
589 600
590 601
@@ -609,6 +620,7 @@ enum ieee80211_conf_changed {
609 IEEE80211_CONF_CHANGE_CHANNEL = BIT(6), 620 IEEE80211_CONF_CHANGE_CHANNEL = BIT(6),
610 IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7), 621 IEEE80211_CONF_CHANGE_RETRY_LIMITS = BIT(7),
611 IEEE80211_CONF_CHANGE_IDLE = BIT(8), 622 IEEE80211_CONF_CHANGE_IDLE = BIT(8),
623 IEEE80211_CONF_CHANGE_QOS = BIT(9),
612}; 624};
613 625
614/** 626/**
@@ -954,6 +966,17 @@ enum ieee80211_tkip_key_type {
954 * Hardware can provide ack status reports of Tx frames to 966 * Hardware can provide ack status reports of Tx frames to
955 * the stack. 967 * the stack.
956 * 968 *
969 * @IEEE80211_HW_CONNECTION_MONITOR:
970 * The hardware performs its own connection monitoring, including
971 * periodic keep-alives to the AP and probing the AP on beacon loss.
972 * When this flag is set, signaling beacon-loss will cause an immediate
973 * change to disassociated state.
974 *
975 * @IEEE80211_HW_SUPPORTS_CQM_RSSI:
976 * Hardware can do connection quality monitoring - i.e. it can monitor
977 * connection quality related parameters, such as the RSSI level and
978 * provide notifications if configured trigger levels are reached.
979 *
957 */ 980 */
958enum ieee80211_hw_flags { 981enum ieee80211_hw_flags {
959 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, 982 IEEE80211_HW_HAS_RATE_CONTROL = 1<<0,
@@ -975,6 +998,8 @@ enum ieee80211_hw_flags {
975 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16, 998 IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS = 1<<16,
976 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17, 999 IEEE80211_HW_SUPPORTS_UAPSD = 1<<17,
977 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, 1000 IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18,
1001 IEEE80211_HW_CONNECTION_MONITOR = 1<<19,
1002 IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20,
978}; 1003};
979 1004
980/** 1005/**
@@ -1606,7 +1631,7 @@ struct ieee80211_ops {
1606 struct ieee80211_bss_conf *info, 1631 struct ieee80211_bss_conf *info,
1607 u32 changed); 1632 u32 changed);
1608 u64 (*prepare_multicast)(struct ieee80211_hw *hw, 1633 u64 (*prepare_multicast)(struct ieee80211_hw *hw,
1609 int mc_count, struct dev_addr_list *mc_list); 1634 struct netdev_hw_addr_list *mc_list);
1610 void (*configure_filter)(struct ieee80211_hw *hw, 1635 void (*configure_filter)(struct ieee80211_hw *hw,
1611 unsigned int changed_flags, 1636 unsigned int changed_flags,
1612 unsigned int *total_flags, 1637 unsigned int *total_flags,
@@ -1802,7 +1827,10 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw);
1802 * ieee80211_rx - receive frame 1827 * ieee80211_rx - receive frame
1803 * 1828 *
1804 * Use this function to hand received frames to mac80211. The receive 1829 * Use this function to hand received frames to mac80211. The receive
1805 * buffer in @skb must start with an IEEE 802.11 header. 1830 * buffer in @skb must start with an IEEE 802.11 header. In case of a
1831 * paged @skb is used, the driver is recommended to put the ieee80211
1832 * header of the frame on the linear part of the @skb to avoid memory
1833 * allocation and/or memcpy by the stack.
1806 * 1834 *
1807 * This function may not be called in IRQ context. Calls to this function 1835 * This function may not be called in IRQ context. Calls to this function
1808 * for a single hardware must be synchronized against each other. Calls to 1836 * for a single hardware must be synchronized against each other. Calls to
@@ -2364,12 +2392,42 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
2364 * 2392 *
2365 * @vif: &struct ieee80211_vif pointer from the add_interface callback. 2393 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2366 * 2394 *
2367 * When beacon filtering is enabled with IEEE80211_HW_BEACON_FILTERING and 2395 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and
2368 * IEEE80211_CONF_PS is set, the driver needs to inform whenever the 2396 * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the
2369 * hardware is not receiving beacons with this function. 2397 * hardware is not receiving beacons with this function.
2370 */ 2398 */
2371void ieee80211_beacon_loss(struct ieee80211_vif *vif); 2399void ieee80211_beacon_loss(struct ieee80211_vif *vif);
2372 2400
2401/**
2402 * ieee80211_connection_loss - inform hardware has lost connection to the AP
2403 *
2404 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2405 *
2406 * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and
2407 * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver
2408 * needs to inform if the connection to the AP has been lost.
2409 *
2410 * This function will cause immediate change to disassociated state,
2411 * without connection recovery attempts.
2412 */
2413void ieee80211_connection_loss(struct ieee80211_vif *vif);
2414
2415/**
2416 * ieee80211_cqm_rssi_notify - inform a configured connection quality monitoring
2417 * rssi threshold triggered
2418 *
2419 * @vif: &struct ieee80211_vif pointer from the add_interface callback.
2420 * @rssi_event: the RSSI trigger event type
2421 * @gfp: context flags
2422 *
2423 * When the %IEEE80211_HW_SUPPORTS_CQM_RSSI is set, and a connection quality
2424 * monitoring is configured with an rssi threshold, the driver will inform
2425 * whenever the rssi level reaches the threshold.
2426 */
2427void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2428 enum nl80211_cqm_rssi_threshold_event rssi_event,
2429 gfp_t gfp);
2430
2373/* Rate control API */ 2431/* Rate control API */
2374 2432
2375/** 2433/**
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 2764994c9136..ae07feec6446 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -59,15 +59,11 @@ struct netns_ipv4 {
59 atomic_t rt_genid; 59 atomic_t rt_genid;
60 60
61#ifdef CONFIG_IP_MROUTE 61#ifdef CONFIG_IP_MROUTE
62 struct sock *mroute_sk; 62#ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
63 struct mfc_cache **mfc_cache_array; 63 struct mr_table *mrt;
64 struct vif_device *vif_table; 64#else
65 int maxvif; 65 struct list_head mr_tables;
66 atomic_t cache_resolve_queue_len; 66 struct fib_rules_ops *mr_rules_ops;
67 int mroute_do_assert;
68 int mroute_do_pim;
69#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
70 int mroute_reg_vif_num;
71#endif 67#endif
72#endif 68#endif
73}; 69};
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index b6cdc33b39c1..9d4d87cc970e 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -12,7 +12,7 @@ struct qdisc_walker {
12 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *); 12 int (*fn)(struct Qdisc *, unsigned long cl, struct qdisc_walker *);
13}; 13};
14 14
15#define QDISC_ALIGNTO 32 15#define QDISC_ALIGNTO 64
16#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1)) 16#define QDISC_ALIGN(len) (((len) + QDISC_ALIGNTO-1) & ~(QDISC_ALIGNTO-1))
17 17
18static inline void *qdisc_priv(struct Qdisc *q) 18static inline void *qdisc_priv(struct Qdisc *q)
diff --git a/include/net/raw.h b/include/net/raw.h
index 6c14a656357a..43c57502659b 100644
--- a/include/net/raw.h
+++ b/include/net/raw.h
@@ -19,6 +19,7 @@
19 19
20 20
21#include <net/protocol.h> 21#include <net/protocol.h>
22#include <linux/icmp.h>
22 23
23extern struct proto raw_prot; 24extern struct proto raw_prot;
24 25
@@ -56,4 +57,16 @@ int raw_seq_open(struct inode *ino, struct file *file,
56void raw_hash_sk(struct sock *sk); 57void raw_hash_sk(struct sock *sk);
57void raw_unhash_sk(struct sock *sk); 58void raw_unhash_sk(struct sock *sk);
58 59
60struct raw_sock {
61 /* inet_sock has to be the first member */
62 struct inet_sock inet;
63 struct icmp_filter filter;
64 u32 ipmr_table;
65};
66
67static inline struct raw_sock *raw_sk(const struct sock *sk)
68{
69 return (struct raw_sock *)sk;
70}
71
59#endif /* _RAW_H */ 72#endif /* _RAW_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 67dc08eaaa45..03ca5d826757 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -73,6 +73,7 @@ struct Qdisc {
73 struct sk_buff_head q; 73 struct sk_buff_head q;
74 struct gnet_stats_basic_packed bstats; 74 struct gnet_stats_basic_packed bstats;
75 struct gnet_stats_queue qstats; 75 struct gnet_stats_queue qstats;
76 struct rcu_head rcu_head;
76}; 77};
77 78
78struct Qdisc_class_ops { 79struct Qdisc_class_ops {
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 78740ec57d5d..59151557406c 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -268,7 +268,7 @@ enum {
268#define SCTP_MIB_MAX __SCTP_MIB_MAX 268#define SCTP_MIB_MAX __SCTP_MIB_MAX
269struct sctp_mib { 269struct sctp_mib {
270 unsigned long mibs[SCTP_MIB_MAX]; 270 unsigned long mibs[SCTP_MIB_MAX];
271} __SNMP_MIB_ALIGN__; 271};
272 272
273 273
274/* Print debugging messages. */ 274/* Print debugging messages. */
diff --git a/include/net/snmp.h b/include/net/snmp.h
index 692ee0061dc4..884fdbb74b23 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -52,26 +52,11 @@ struct snmp_mib {
52 * count on the 20Gb/s + networks people expect in a few years time! 52 * count on the 20Gb/s + networks people expect in a few years time!
53 */ 53 */
54 54
55/*
56 * The rule for padding:
57 * Best is power of two because then the right structure can be found by a
58 * simple shift. The structure should be always cache line aligned.
59 * gcc needs n=alignto(cachelinesize, popcnt(sizeof(bla_mib))) shift/add
60 * instructions to emulate multiply in case it is not power-of-two.
61 * Currently n is always <=3 for all sizes so simple cache line alignment
62 * is enough.
63 *
64 * The best solution would be a global CPU local area , especially on 64
65 * and 128byte cacheline machine it makes a *lot* of sense -AK
66 */
67
68#define __SNMP_MIB_ALIGN__ ____cacheline_aligned
69
70/* IPstats */ 55/* IPstats */
71#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX 56#define IPSTATS_MIB_MAX __IPSTATS_MIB_MAX
72struct ipstats_mib { 57struct ipstats_mib {
73 unsigned long mibs[IPSTATS_MIB_MAX]; 58 unsigned long mibs[IPSTATS_MIB_MAX];
74} __SNMP_MIB_ALIGN__; 59};
75 60
76/* ICMP */ 61/* ICMP */
77#define ICMP_MIB_DUMMY __ICMP_MIB_MAX 62#define ICMP_MIB_DUMMY __ICMP_MIB_MAX
@@ -79,36 +64,36 @@ struct ipstats_mib {
79 64
80struct icmp_mib { 65struct icmp_mib {
81 unsigned long mibs[ICMP_MIB_MAX]; 66 unsigned long mibs[ICMP_MIB_MAX];
82} __SNMP_MIB_ALIGN__; 67};
83 68
84#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX 69#define ICMPMSG_MIB_MAX __ICMPMSG_MIB_MAX
85struct icmpmsg_mib { 70struct icmpmsg_mib {
86 unsigned long mibs[ICMPMSG_MIB_MAX]; 71 unsigned long mibs[ICMPMSG_MIB_MAX];
87} __SNMP_MIB_ALIGN__; 72};
88 73
89/* ICMP6 (IPv6-ICMP) */ 74/* ICMP6 (IPv6-ICMP) */
90#define ICMP6_MIB_MAX __ICMP6_MIB_MAX 75#define ICMP6_MIB_MAX __ICMP6_MIB_MAX
91struct icmpv6_mib { 76struct icmpv6_mib {
92 unsigned long mibs[ICMP6_MIB_MAX]; 77 unsigned long mibs[ICMP6_MIB_MAX];
93} __SNMP_MIB_ALIGN__; 78};
94 79
95#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX 80#define ICMP6MSG_MIB_MAX __ICMP6MSG_MIB_MAX
96struct icmpv6msg_mib { 81struct icmpv6msg_mib {
97 unsigned long mibs[ICMP6MSG_MIB_MAX]; 82 unsigned long mibs[ICMP6MSG_MIB_MAX];
98} __SNMP_MIB_ALIGN__; 83};
99 84
100 85
101/* TCP */ 86/* TCP */
102#define TCP_MIB_MAX __TCP_MIB_MAX 87#define TCP_MIB_MAX __TCP_MIB_MAX
103struct tcp_mib { 88struct tcp_mib {
104 unsigned long mibs[TCP_MIB_MAX]; 89 unsigned long mibs[TCP_MIB_MAX];
105} __SNMP_MIB_ALIGN__; 90};
106 91
107/* UDP */ 92/* UDP */
108#define UDP_MIB_MAX __UDP_MIB_MAX 93#define UDP_MIB_MAX __UDP_MIB_MAX
109struct udp_mib { 94struct udp_mib {
110 unsigned long mibs[UDP_MIB_MAX]; 95 unsigned long mibs[UDP_MIB_MAX];
111} __SNMP_MIB_ALIGN__; 96};
112 97
113/* Linux */ 98/* Linux */
114#define LINUX_MIB_MAX __LINUX_MIB_MAX 99#define LINUX_MIB_MAX __LINUX_MIB_MAX
diff --git a/include/net/sock.h b/include/net/sock.h
index b4603cd54fcd..8ab05146a447 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -262,7 +262,7 @@ struct sock {
262#ifdef CONFIG_XFRM 262#ifdef CONFIG_XFRM
263 struct xfrm_policy *sk_policy[2]; 263 struct xfrm_policy *sk_policy[2];
264#endif 264#endif
265 rwlock_t sk_dst_lock; 265 spinlock_t sk_dst_lock;
266 atomic_t sk_rmem_alloc; 266 atomic_t sk_rmem_alloc;
267 atomic_t sk_wmem_alloc; 267 atomic_t sk_wmem_alloc;
268 atomic_t sk_omem_alloc; 268 atomic_t sk_omem_alloc;
@@ -1160,6 +1160,10 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1160 sk->sk_socket = sock; 1160 sk->sk_socket = sock;
1161} 1161}
1162 1162
1163static inline wait_queue_head_t *sk_sleep(struct sock *sk)
1164{
1165 return sk->sk_sleep;
1166}
1163/* Detach socket from process context. 1167/* Detach socket from process context.
1164 * Announce socket dead, detach it from wait queue and inode. 1168 * Announce socket dead, detach it from wait queue and inode.
1165 * Note that parent inode held reference count on this struct sock, 1169 * Note that parent inode held reference count on this struct sock,
@@ -1192,7 +1196,8 @@ extern unsigned long sock_i_ino(struct sock *sk);
1192static inline struct dst_entry * 1196static inline struct dst_entry *
1193__sk_dst_get(struct sock *sk) 1197__sk_dst_get(struct sock *sk)
1194{ 1198{
1195 return sk->sk_dst_cache; 1199 return rcu_dereference_check(sk->sk_dst_cache, rcu_read_lock_held() ||
1200 sock_owned_by_user(sk));
1196} 1201}
1197 1202
1198static inline struct dst_entry * 1203static inline struct dst_entry *
@@ -1200,50 +1205,62 @@ sk_dst_get(struct sock *sk)
1200{ 1205{
1201 struct dst_entry *dst; 1206 struct dst_entry *dst;
1202 1207
1203 read_lock(&sk->sk_dst_lock); 1208 rcu_read_lock();
1204 dst = sk->sk_dst_cache; 1209 dst = rcu_dereference(sk->sk_dst_cache);
1205 if (dst) 1210 if (dst)
1206 dst_hold(dst); 1211 dst_hold(dst);
1207 read_unlock(&sk->sk_dst_lock); 1212 rcu_read_unlock();
1208 return dst; 1213 return dst;
1209} 1214}
1210 1215
1216extern void sk_reset_txq(struct sock *sk);
1217
1218static inline void dst_negative_advice(struct sock *sk)
1219{
1220 struct dst_entry *ndst, *dst = __sk_dst_get(sk);
1221
1222 if (dst && dst->ops->negative_advice) {
1223 ndst = dst->ops->negative_advice(dst);
1224
1225 if (ndst != dst) {
1226 rcu_assign_pointer(sk->sk_dst_cache, ndst);
1227 sk_reset_txq(sk);
1228 }
1229 }
1230}
1231
1211static inline void 1232static inline void
1212__sk_dst_set(struct sock *sk, struct dst_entry *dst) 1233__sk_dst_set(struct sock *sk, struct dst_entry *dst)
1213{ 1234{
1214 struct dst_entry *old_dst; 1235 struct dst_entry *old_dst;
1215 1236
1216 sk_tx_queue_clear(sk); 1237 sk_tx_queue_clear(sk);
1217 old_dst = sk->sk_dst_cache; 1238 old_dst = rcu_dereference_check(sk->sk_dst_cache,
1218 sk->sk_dst_cache = dst; 1239 lockdep_is_held(&sk->sk_dst_lock));
1240 rcu_assign_pointer(sk->sk_dst_cache, dst);
1219 dst_release(old_dst); 1241 dst_release(old_dst);
1220} 1242}
1221 1243
1222static inline void 1244static inline void
1223sk_dst_set(struct sock *sk, struct dst_entry *dst) 1245sk_dst_set(struct sock *sk, struct dst_entry *dst)
1224{ 1246{
1225 write_lock(&sk->sk_dst_lock); 1247 spin_lock(&sk->sk_dst_lock);
1226 __sk_dst_set(sk, dst); 1248 __sk_dst_set(sk, dst);
1227 write_unlock(&sk->sk_dst_lock); 1249 spin_unlock(&sk->sk_dst_lock);
1228} 1250}
1229 1251
1230static inline void 1252static inline void
1231__sk_dst_reset(struct sock *sk) 1253__sk_dst_reset(struct sock *sk)
1232{ 1254{
1233 struct dst_entry *old_dst; 1255 __sk_dst_set(sk, NULL);
1234
1235 sk_tx_queue_clear(sk);
1236 old_dst = sk->sk_dst_cache;
1237 sk->sk_dst_cache = NULL;
1238 dst_release(old_dst);
1239} 1256}
1240 1257
1241static inline void 1258static inline void
1242sk_dst_reset(struct sock *sk) 1259sk_dst_reset(struct sock *sk)
1243{ 1260{
1244 write_lock(&sk->sk_dst_lock); 1261 spin_lock(&sk->sk_dst_lock);
1245 __sk_dst_reset(sk); 1262 __sk_dst_reset(sk);
1246 write_unlock(&sk->sk_dst_lock); 1263 spin_unlock(&sk->sk_dst_lock);
1247} 1264}
1248 1265
1249extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1266extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
@@ -1333,8 +1350,8 @@ static inline int sk_has_allocations(const struct sock *sk)
1333 * tp->rcv_nxt check sock_def_readable 1350 * tp->rcv_nxt check sock_def_readable
1334 * ... { 1351 * ... {
1335 * schedule ... 1352 * schedule ...
1336 * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1353 * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
1337 * wake_up_interruptible(sk->sk_sleep) 1354 * wake_up_interruptible(sk_sleep(sk))
1338 * ... 1355 * ...
1339 * } 1356 * }
1340 * 1357 *
@@ -1355,7 +1372,7 @@ static inline int sk_has_sleeper(struct sock *sk)
1355 * This memory barrier is paired in the sock_poll_wait. 1372 * This memory barrier is paired in the sock_poll_wait.
1356 */ 1373 */
1357 smp_mb__after_lock(); 1374 smp_mb__after_lock();
1358 return sk->sk_sleep && waitqueue_active(sk->sk_sleep); 1375 return sk_sleep(sk) && waitqueue_active(sk_sleep(sk));
1359} 1376}
1360 1377
1361/** 1378/**
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 75be5a28815d..b7d83d204a93 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -423,7 +423,7 @@ extern u8 *tcp_parse_md5sig_option(struct tcphdr *th);
423 * TCP v4 functions exported for the inet6 API 423 * TCP v4 functions exported for the inet6 API
424 */ 424 */
425 425
426extern void tcp_v4_send_check(struct sock *sk, int len, 426extern void tcp_v4_send_check(struct sock *sk,
427 struct sk_buff *skb); 427 struct sk_buff *skb);
428 428
429extern int tcp_v4_conn_request(struct sock *sk, 429extern int tcp_v4_conn_request(struct sock *sk,
@@ -939,7 +939,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
939 939
940 tp->ucopy.memory = 0; 940 tp->ucopy.memory = 0;
941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { 941 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
942 wake_up_interruptible_sync_poll(sk->sk_sleep, 942 wake_up_interruptible_sync_poll(sk_sleep(sk),
943 POLLIN | POLLRDNORM | POLLRDBAND); 943 POLLIN | POLLRDNORM | POLLRDBAND);
944 if (!inet_csk_ack_scheduled(sk)) 944 if (!inet_csk_ack_scheduled(sk))
945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, 945 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index ac52f33f3e4a..1913af67c43d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -20,6 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/ipv6.h> 21#include <net/ipv6.h>
22#include <net/ip6_fib.h> 22#include <net/ip6_fib.h>
23#include <net/flow.h>
23 24
24#include <linux/interrupt.h> 25#include <linux/interrupt.h>
25 26
@@ -267,7 +268,6 @@ struct xfrm_policy_afinfo {
267 xfrm_address_t *saddr, 268 xfrm_address_t *saddr,
268 xfrm_address_t *daddr); 269 xfrm_address_t *daddr);
269 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr); 270 int (*get_saddr)(struct net *net, xfrm_address_t *saddr, xfrm_address_t *daddr);
270 struct dst_entry *(*find_bundle)(struct flowi *fl, struct xfrm_policy *policy);
271 void (*decode_session)(struct sk_buff *skb, 271 void (*decode_session)(struct sk_buff *skb,
272 struct flowi *fl, 272 struct flowi *fl,
273 int reverse); 273 int reverse);
@@ -482,13 +482,14 @@ struct xfrm_policy {
482 atomic_t refcnt; 482 atomic_t refcnt;
483 struct timer_list timer; 483 struct timer_list timer;
484 484
485 struct flow_cache_object flo;
486 atomic_t genid;
485 u32 priority; 487 u32 priority;
486 u32 index; 488 u32 index;
487 struct xfrm_mark mark; 489 struct xfrm_mark mark;
488 struct xfrm_selector selector; 490 struct xfrm_selector selector;
489 struct xfrm_lifetime_cfg lft; 491 struct xfrm_lifetime_cfg lft;
490 struct xfrm_lifetime_cur curlft; 492 struct xfrm_lifetime_cur curlft;
491 struct dst_entry *bundles;
492 struct xfrm_policy_walk_entry walk; 493 struct xfrm_policy_walk_entry walk;
493 u8 type; 494 u8 type;
494 u8 action; 495 u8 action;
@@ -735,19 +736,12 @@ static inline void xfrm_pol_put(struct xfrm_policy *policy)
735 xfrm_policy_destroy(policy); 736 xfrm_policy_destroy(policy);
736} 737}
737 738
738#ifdef CONFIG_XFRM_SUB_POLICY
739static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols) 739static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
740{ 740{
741 int i; 741 int i;
742 for (i = npols - 1; i >= 0; --i) 742 for (i = npols - 1; i >= 0; --i)
743 xfrm_pol_put(pols[i]); 743 xfrm_pol_put(pols[i]);
744} 744}
745#else
746static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
747{
748 xfrm_pol_put(pols[0]);
749}
750#endif
751 745
752extern void __xfrm_state_destroy(struct xfrm_state *); 746extern void __xfrm_state_destroy(struct xfrm_state *);
753 747
@@ -878,11 +872,15 @@ struct xfrm_dst {
878 struct rt6_info rt6; 872 struct rt6_info rt6;
879 } u; 873 } u;
880 struct dst_entry *route; 874 struct dst_entry *route;
875 struct flow_cache_object flo;
876 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
877 int num_pols, num_xfrms;
881#ifdef CONFIG_XFRM_SUB_POLICY 878#ifdef CONFIG_XFRM_SUB_POLICY
882 struct flowi *origin; 879 struct flowi *origin;
883 struct xfrm_selector *partner; 880 struct xfrm_selector *partner;
884#endif 881#endif
885 u32 genid; 882 u32 xfrm_genid;
883 u32 policy_genid;
886 u32 route_mtu_cached; 884 u32 route_mtu_cached;
887 u32 child_mtu_cached; 885 u32 child_mtu_cached;
888 u32 route_cookie; 886 u32 route_cookie;
@@ -892,6 +890,7 @@ struct xfrm_dst {
892#ifdef CONFIG_XFRM 890#ifdef CONFIG_XFRM
893static inline void xfrm_dst_destroy(struct xfrm_dst *xdst) 891static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
894{ 892{
893 xfrm_pols_put(xdst->pols, xdst->num_pols);
895 dst_release(xdst->route); 894 dst_release(xdst->route);
896 if (likely(xdst->u.dst.xfrm)) 895 if (likely(xdst->u.dst.xfrm))
897 xfrm_state_put(xdst->u.dst.xfrm); 896 xfrm_state_put(xdst->u.dst.xfrm);
diff --git a/net/802/garp.c b/net/802/garp.c
index 9ed7c0e7dc17..941f2a324d3a 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -576,7 +576,7 @@ int garp_init_applicant(struct net_device *dev, struct garp_application *appl)
576 if (!app) 576 if (!app)
577 goto err2; 577 goto err2;
578 578
579 err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); 579 err = dev_mc_add(dev, appl->proto.group_address);
580 if (err < 0) 580 if (err < 0)
581 goto err3; 581 goto err3;
582 582
@@ -616,7 +616,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
616 garp_pdu_queue(app); 616 garp_pdu_queue(app);
617 garp_queue_xmit(app); 617 garp_queue_xmit(app);
618 618
619 dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); 619 dev_mc_del(dev, appl->proto.group_address);
620 kfree(app); 620 kfree(app);
621 garp_release_port(dev); 621 garp_release_port(dev);
622} 622}
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 97da977c2a23..3c1c8c14e929 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -357,13 +357,13 @@ static void vlan_sync_address(struct net_device *dev,
357 * the new address */ 357 * the new address */
358 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 358 if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
359 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 359 !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
360 dev_unicast_delete(dev, vlandev->dev_addr); 360 dev_uc_del(dev, vlandev->dev_addr);
361 361
362 /* vlan address was equal to the old address and is different from 362 /* vlan address was equal to the old address and is different from
363 * the new address */ 363 * the new address */
364 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) && 364 if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
365 compare_ether_addr(vlandev->dev_addr, dev->dev_addr)) 365 compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
366 dev_unicast_add(dev, vlandev->dev_addr); 366 dev_uc_add(dev, vlandev->dev_addr);
367 367
368 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN); 368 memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
369} 369}
@@ -533,6 +533,10 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
533 } 533 }
534 unregister_netdevice_many(&list); 534 unregister_netdevice_many(&list);
535 break; 535 break;
536
537 case NETDEV_PRE_TYPE_CHANGE:
538 /* Forbid underlaying device to change its type. */
539 return NOTIFY_BAD;
536 } 540 }
537 541
538out: 542out:
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 29b6348c8d4d..b5249c5fd4d3 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -470,7 +470,7 @@ static int vlan_dev_open(struct net_device *dev)
470 return -ENETDOWN; 470 return -ENETDOWN;
471 471
472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) { 472 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
473 err = dev_unicast_add(real_dev, dev->dev_addr); 473 err = dev_uc_add(real_dev, dev->dev_addr);
474 if (err < 0) 474 if (err < 0)
475 goto out; 475 goto out;
476 } 476 }
@@ -499,7 +499,7 @@ clear_allmulti:
499 dev_set_allmulti(real_dev, -1); 499 dev_set_allmulti(real_dev, -1);
500del_unicast: 500del_unicast:
501 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 501 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
502 dev_unicast_delete(real_dev, dev->dev_addr); 502 dev_uc_del(real_dev, dev->dev_addr);
503out: 503out:
504 netif_carrier_off(dev); 504 netif_carrier_off(dev);
505 return err; 505 return err;
@@ -514,14 +514,14 @@ static int vlan_dev_stop(struct net_device *dev)
514 vlan_gvrp_request_leave(dev); 514 vlan_gvrp_request_leave(dev);
515 515
516 dev_mc_unsync(real_dev, dev); 516 dev_mc_unsync(real_dev, dev);
517 dev_unicast_unsync(real_dev, dev); 517 dev_uc_unsync(real_dev, dev);
518 if (dev->flags & IFF_ALLMULTI) 518 if (dev->flags & IFF_ALLMULTI)
519 dev_set_allmulti(real_dev, -1); 519 dev_set_allmulti(real_dev, -1);
520 if (dev->flags & IFF_PROMISC) 520 if (dev->flags & IFF_PROMISC)
521 dev_set_promiscuity(real_dev, -1); 521 dev_set_promiscuity(real_dev, -1);
522 522
523 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 523 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
524 dev_unicast_delete(real_dev, dev->dev_addr); 524 dev_uc_del(real_dev, dev->dev_addr);
525 525
526 netif_carrier_off(dev); 526 netif_carrier_off(dev);
527 return 0; 527 return 0;
@@ -540,13 +540,13 @@ static int vlan_dev_set_mac_address(struct net_device *dev, void *p)
540 goto out; 540 goto out;
541 541
542 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) { 542 if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
543 err = dev_unicast_add(real_dev, addr->sa_data); 543 err = dev_uc_add(real_dev, addr->sa_data);
544 if (err < 0) 544 if (err < 0)
545 return err; 545 return err;
546 } 546 }
547 547
548 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) 548 if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
549 dev_unicast_delete(real_dev, dev->dev_addr); 549 dev_uc_del(real_dev, dev->dev_addr);
550 550
551out: 551out:
552 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 552 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@ -663,7 +663,7 @@ static void vlan_dev_change_rx_flags(struct net_device *dev, int change)
663static void vlan_dev_set_rx_mode(struct net_device *vlan_dev) 663static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
664{ 664{
665 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 665 dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
666 dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev); 666 dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
667} 667}
668 668
669/* 669/*
diff --git a/net/Kconfig b/net/Kconfig
index 041c35edb763..0d68b40fc0e6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -186,6 +186,7 @@ source "net/sctp/Kconfig"
186source "net/rds/Kconfig" 186source "net/rds/Kconfig"
187source "net/tipc/Kconfig" 187source "net/tipc/Kconfig"
188source "net/atm/Kconfig" 188source "net/atm/Kconfig"
189source "net/l2tp/Kconfig"
189source "net/802/Kconfig" 190source "net/802/Kconfig"
190source "net/bridge/Kconfig" 191source "net/bridge/Kconfig"
191source "net/dsa/Kconfig" 192source "net/dsa/Kconfig"
@@ -203,6 +204,11 @@ source "net/ieee802154/Kconfig"
203source "net/sched/Kconfig" 204source "net/sched/Kconfig"
204source "net/dcb/Kconfig" 205source "net/dcb/Kconfig"
205 206
207config RPS
208 boolean
209 depends on SMP && SYSFS
210 default y
211
206menu "Network testing" 212menu "Network testing"
207 213
208config NET_PKTGEN 214config NET_PKTGEN
@@ -275,5 +281,7 @@ source "net/wimax/Kconfig"
275 281
276source "net/rfkill/Kconfig" 282source "net/rfkill/Kconfig"
277source "net/9p/Kconfig" 283source "net/9p/Kconfig"
284source "net/caif/Kconfig"
285
278 286
279endif # if NET 287endif # if NET
diff --git a/net/Makefile b/net/Makefile
index 1542e7268a7b..cb7bdc1210cb 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_BT) += bluetooth/
40obj-$(CONFIG_SUNRPC) += sunrpc/ 40obj-$(CONFIG_SUNRPC) += sunrpc/
41obj-$(CONFIG_AF_RXRPC) += rxrpc/ 41obj-$(CONFIG_AF_RXRPC) += rxrpc/
42obj-$(CONFIG_ATM) += atm/ 42obj-$(CONFIG_ATM) += atm/
43obj-$(CONFIG_L2TP) += l2tp/
43obj-$(CONFIG_DECNET) += decnet/ 44obj-$(CONFIG_DECNET) += decnet/
44obj-$(CONFIG_ECONET) += econet/ 45obj-$(CONFIG_ECONET) += econet/
45obj-$(CONFIG_PHONET) += phonet/ 46obj-$(CONFIG_PHONET) += phonet/
@@ -56,6 +57,7 @@ obj-$(CONFIG_NETLABEL) += netlabel/
56obj-$(CONFIG_IUCV) += iucv/ 57obj-$(CONFIG_IUCV) += iucv/
57obj-$(CONFIG_RFKILL) += rfkill/ 58obj-$(CONFIG_RFKILL) += rfkill/
58obj-$(CONFIG_NET_9P) += 9p/ 59obj-$(CONFIG_NET_9P) += 9p/
60obj-$(CONFIG_CAIF) += caif/
59ifneq ($(CONFIG_DCB),) 61ifneq ($(CONFIG_DCB),)
60obj-y += dcb/ 62obj-y += dcb/
61endif 63endif
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 7b02967fbbe7..c410b93fda2e 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -782,7 +782,7 @@ static int atif_ioctl(int cmd, void __user *arg)
782 atrtr_create(&rtdef, dev); 782 atrtr_create(&rtdef, dev);
783 } 783 }
784 } 784 }
785 dev_mc_add(dev, aarp_mcast, 6, 1); 785 dev_mc_add_global(dev, aarp_mcast);
786 return 0; 786 return 0;
787 787
788 case SIOCGIFADDR: 788 case SIOCGIFADDR:
diff --git a/net/atm/common.c b/net/atm/common.c
index 97ed94aa0cbc..e3e10e6f8628 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -92,7 +92,7 @@ static void vcc_def_wakeup(struct sock *sk)
92{ 92{
93 read_lock(&sk->sk_callback_lock); 93 read_lock(&sk->sk_callback_lock);
94 if (sk_has_sleeper(sk)) 94 if (sk_has_sleeper(sk))
95 wake_up(sk->sk_sleep); 95 wake_up(sk_sleep(sk));
96 read_unlock(&sk->sk_callback_lock); 96 read_unlock(&sk->sk_callback_lock);
97} 97}
98 98
@@ -110,7 +110,7 @@ static void vcc_write_space(struct sock *sk)
110 110
111 if (vcc_writable(sk)) { 111 if (vcc_writable(sk)) {
112 if (sk_has_sleeper(sk)) 112 if (sk_has_sleeper(sk))
113 wake_up_interruptible(sk->sk_sleep); 113 wake_up_interruptible(sk_sleep(sk));
114 114
115 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 115 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
116 } 116 }
@@ -549,7 +549,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
549 } 549 }
550 550
551 eff = (size+3) & ~3; /* align to word boundary */ 551 eff = (size+3) & ~3; /* align to word boundary */
552 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 552 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
553 error = 0; 553 error = 0;
554 while (!(skb = alloc_tx(vcc, eff))) { 554 while (!(skb = alloc_tx(vcc, eff))) {
555 if (m->msg_flags & MSG_DONTWAIT) { 555 if (m->msg_flags & MSG_DONTWAIT) {
@@ -568,9 +568,9 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
568 send_sig(SIGPIPE, current, 0); 568 send_sig(SIGPIPE, current, 0);
569 break; 569 break;
570 } 570 }
571 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 571 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
572 } 572 }
573 finish_wait(sk->sk_sleep, &wait); 573 finish_wait(sk_sleep(sk), &wait);
574 if (error) 574 if (error)
575 goto out; 575 goto out;
576 skb->dev = NULL; /* for paths shared with net_device interfaces */ 576 skb->dev = NULL; /* for paths shared with net_device interfaces */
@@ -595,7 +595,7 @@ unsigned int vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
595 struct atm_vcc *vcc; 595 struct atm_vcc *vcc;
596 unsigned int mask; 596 unsigned int mask;
597 597
598 sock_poll_wait(file, sk->sk_sleep, wait); 598 sock_poll_wait(file, sk_sleep(sk), wait);
599 mask = 0; 599 mask = 0;
600 600
601 vcc = ATM_SD(sock); 601 vcc = ATM_SD(sock);
diff --git a/net/atm/proc.c b/net/atm/proc.c
index 696e218436e5..6262aeae398e 100644
--- a/net/atm/proc.c
+++ b/net/atm/proc.c
@@ -407,7 +407,6 @@ EXPORT_SYMBOL(atm_proc_root);
407 407
408int atm_proc_dev_register(struct atm_dev *dev) 408int atm_proc_dev_register(struct atm_dev *dev)
409{ 409{
410 int digits, num;
411 int error; 410 int error;
412 411
413 /* No proc info */ 412 /* No proc info */
@@ -415,16 +414,9 @@ int atm_proc_dev_register(struct atm_dev *dev)
415 return 0; 414 return 0;
416 415
417 error = -ENOMEM; 416 error = -ENOMEM;
418 digits = 0; 417 dev->proc_name = kasprintf(GFP_KERNEL, "%s:%d", dev->type, dev->number);
419 for (num = dev->number; num; num /= 10)
420 digits++;
421 if (!digits)
422 digits++;
423
424 dev->proc_name = kmalloc(strlen(dev->type) + digits + 2, GFP_KERNEL);
425 if (!dev->proc_name) 418 if (!dev->proc_name)
426 goto err_out; 419 goto err_out;
427 sprintf(dev->proc_name, "%s:%d", dev->type, dev->number);
428 420
429 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root, 421 dev->proc_entry = proc_create_data(dev->proc_name, 0, atm_proc_root,
430 &proc_atm_dev_ops, dev); 422 &proc_atm_dev_ops, dev);
diff --git a/net/atm/signaling.c b/net/atm/signaling.c
index 6ba6e466ee54..509c8ac02b63 100644
--- a/net/atm/signaling.c
+++ b/net/atm/signaling.c
@@ -131,7 +131,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
131 } 131 }
132 sk->sk_ack_backlog++; 132 sk->sk_ack_backlog++;
133 skb_queue_tail(&sk->sk_receive_queue, skb); 133 skb_queue_tail(&sk->sk_receive_queue, skb);
134 pr_debug("waking sk->sk_sleep 0x%p\n", sk->sk_sleep); 134 pr_debug("waking sk_sleep(sk) 0x%p\n", sk_sleep(sk));
135 sk->sk_state_change(sk); 135 sk->sk_state_change(sk);
136as_indicate_complete: 136as_indicate_complete:
137 release_sock(sk); 137 release_sock(sk);
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 3ba9a45a51ac..754ee4791d96 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -49,14 +49,14 @@ static void svc_disconnect(struct atm_vcc *vcc)
49 49
50 pr_debug("%p\n", vcc); 50 pr_debug("%p\n", vcc);
51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) { 51 if (test_bit(ATM_VF_REGIS, &vcc->flags)) {
52 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 52 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
53 sigd_enq(vcc, as_close, NULL, NULL, NULL); 53 sigd_enq(vcc, as_close, NULL, NULL, NULL);
54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 54 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
55 schedule(); 55 schedule();
56 prepare_to_wait(sk->sk_sleep, &wait, 56 prepare_to_wait(sk_sleep(sk), &wait,
57 TASK_UNINTERRUPTIBLE); 57 TASK_UNINTERRUPTIBLE);
58 } 58 }
59 finish_wait(sk->sk_sleep, &wait); 59 finish_wait(sk_sleep(sk), &wait);
60 } 60 }
61 /* beware - socket is still in use by atmsigd until the last 61 /* beware - socket is still in use by atmsigd until the last
62 as_indicate has been answered */ 62 as_indicate has been answered */
@@ -125,13 +125,13 @@ static int svc_bind(struct socket *sock, struct sockaddr *sockaddr,
125 } 125 }
126 vcc->local = *addr; 126 vcc->local = *addr;
127 set_bit(ATM_VF_WAITING, &vcc->flags); 127 set_bit(ATM_VF_WAITING, &vcc->flags);
128 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 128 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); 129 sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local);
130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 130 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
131 schedule(); 131 schedule();
132 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 132 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
133 } 133 }
134 finish_wait(sk->sk_sleep, &wait); 134 finish_wait(sk_sleep(sk), &wait);
135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ 135 clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */
136 if (!sigd) { 136 if (!sigd) {
137 error = -EUNATCH; 137 error = -EUNATCH;
@@ -201,10 +201,10 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
201 } 201 }
202 vcc->remote = *addr; 202 vcc->remote = *addr;
203 set_bit(ATM_VF_WAITING, &vcc->flags); 203 set_bit(ATM_VF_WAITING, &vcc->flags);
204 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 204 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); 205 sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote);
206 if (flags & O_NONBLOCK) { 206 if (flags & O_NONBLOCK) {
207 finish_wait(sk->sk_sleep, &wait); 207 finish_wait(sk_sleep(sk), &wait);
208 sock->state = SS_CONNECTING; 208 sock->state = SS_CONNECTING;
209 error = -EINPROGRESS; 209 error = -EINPROGRESS;
210 goto out; 210 goto out;
@@ -213,7 +213,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 213 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
214 schedule(); 214 schedule();
215 if (!signal_pending(current)) { 215 if (!signal_pending(current)) {
216 prepare_to_wait(sk->sk_sleep, &wait, 216 prepare_to_wait(sk_sleep(sk), &wait,
217 TASK_INTERRUPTIBLE); 217 TASK_INTERRUPTIBLE);
218 continue; 218 continue;
219 } 219 }
@@ -232,14 +232,14 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
232 */ 232 */
233 sigd_enq(vcc, as_close, NULL, NULL, NULL); 233 sigd_enq(vcc, as_close, NULL, NULL, NULL);
234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 234 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
235 prepare_to_wait(sk->sk_sleep, &wait, 235 prepare_to_wait(sk_sleep(sk), &wait,
236 TASK_INTERRUPTIBLE); 236 TASK_INTERRUPTIBLE);
237 schedule(); 237 schedule();
238 } 238 }
239 if (!sk->sk_err) 239 if (!sk->sk_err)
240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && 240 while (!test_bit(ATM_VF_RELEASED, &vcc->flags) &&
241 sigd) { 241 sigd) {
242 prepare_to_wait(sk->sk_sleep, &wait, 242 prepare_to_wait(sk_sleep(sk), &wait,
243 TASK_INTERRUPTIBLE); 243 TASK_INTERRUPTIBLE);
244 schedule(); 244 schedule();
245 } 245 }
@@ -250,7 +250,7 @@ static int svc_connect(struct socket *sock, struct sockaddr *sockaddr,
250 error = -EINTR; 250 error = -EINTR;
251 break; 251 break;
252 } 252 }
253 finish_wait(sk->sk_sleep, &wait); 253 finish_wait(sk_sleep(sk), &wait);
254 if (error) 254 if (error)
255 goto out; 255 goto out;
256 if (!sigd) { 256 if (!sigd) {
@@ -302,13 +302,13 @@ static int svc_listen(struct socket *sock, int backlog)
302 goto out; 302 goto out;
303 } 303 }
304 set_bit(ATM_VF_WAITING, &vcc->flags); 304 set_bit(ATM_VF_WAITING, &vcc->flags);
305 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 305 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); 306 sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local);
307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 307 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
308 schedule(); 308 schedule();
309 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 309 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
310 } 310 }
311 finish_wait(sk->sk_sleep, &wait); 311 finish_wait(sk_sleep(sk), &wait);
312 if (!sigd) { 312 if (!sigd) {
313 error = -EUNATCH; 313 error = -EUNATCH;
314 goto out; 314 goto out;
@@ -343,7 +343,7 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
343 while (1) { 343 while (1) {
344 DEFINE_WAIT(wait); 344 DEFINE_WAIT(wait);
345 345
346 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 346 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && 347 while (!(skb = skb_dequeue(&sk->sk_receive_queue)) &&
348 sigd) { 348 sigd) {
349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) 349 if (test_bit(ATM_VF_RELEASED, &old_vcc->flags))
@@ -363,10 +363,10 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
363 error = -ERESTARTSYS; 363 error = -ERESTARTSYS;
364 break; 364 break;
365 } 365 }
366 prepare_to_wait(sk->sk_sleep, &wait, 366 prepare_to_wait(sk_sleep(sk), &wait,
367 TASK_INTERRUPTIBLE); 367 TASK_INTERRUPTIBLE);
368 } 368 }
369 finish_wait(sk->sk_sleep, &wait); 369 finish_wait(sk_sleep(sk), &wait);
370 if (error) 370 if (error)
371 goto out; 371 goto out;
372 if (!skb) { 372 if (!skb) {
@@ -392,17 +392,17 @@ static int svc_accept(struct socket *sock, struct socket *newsock, int flags)
392 } 392 }
393 /* wait should be short, so we ignore the non-blocking flag */ 393 /* wait should be short, so we ignore the non-blocking flag */
394 set_bit(ATM_VF_WAITING, &new_vcc->flags); 394 set_bit(ATM_VF_WAITING, &new_vcc->flags);
395 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, 395 prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
396 TASK_UNINTERRUPTIBLE); 396 TASK_UNINTERRUPTIBLE);
397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); 397 sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL);
398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { 398 while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) {
399 release_sock(sk); 399 release_sock(sk);
400 schedule(); 400 schedule();
401 lock_sock(sk); 401 lock_sock(sk);
402 prepare_to_wait(sk_atm(new_vcc)->sk_sleep, &wait, 402 prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait,
403 TASK_UNINTERRUPTIBLE); 403 TASK_UNINTERRUPTIBLE);
404 } 404 }
405 finish_wait(sk_atm(new_vcc)->sk_sleep, &wait); 405 finish_wait(sk_sleep(sk_atm(new_vcc)), &wait);
406 if (!sigd) { 406 if (!sigd) {
407 error = -EUNATCH; 407 error = -EUNATCH;
408 goto out; 408 goto out;
@@ -438,14 +438,14 @@ int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos)
438 DEFINE_WAIT(wait); 438 DEFINE_WAIT(wait);
439 439
440 set_bit(ATM_VF_WAITING, &vcc->flags); 440 set_bit(ATM_VF_WAITING, &vcc->flags);
441 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 441 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); 442 sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0);
443 while (test_bit(ATM_VF_WAITING, &vcc->flags) && 443 while (test_bit(ATM_VF_WAITING, &vcc->flags) &&
444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { 444 !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) {
445 schedule(); 445 schedule();
446 prepare_to_wait(sk->sk_sleep, &wait, TASK_UNINTERRUPTIBLE); 446 prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE);
447 } 447 }
448 finish_wait(sk->sk_sleep, &wait); 448 finish_wait(sk_sleep(sk), &wait);
449 if (!sigd) 449 if (!sigd)
450 return -EUNATCH; 450 return -EUNATCH;
451 return -sk->sk_err; 451 return -sk->sk_err;
@@ -534,20 +534,20 @@ static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr,
534 534
535 lock_sock(sk); 535 lock_sock(sk);
536 set_bit(ATM_VF_WAITING, &vcc->flags); 536 set_bit(ATM_VF_WAITING, &vcc->flags);
537 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 537 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
538 sigd_enq(vcc, as_addparty, NULL, NULL, 538 sigd_enq(vcc, as_addparty, NULL, NULL,
539 (struct sockaddr_atmsvc *) sockaddr); 539 (struct sockaddr_atmsvc *) sockaddr);
540 if (flags & O_NONBLOCK) { 540 if (flags & O_NONBLOCK) {
541 finish_wait(sk->sk_sleep, &wait); 541 finish_wait(sk_sleep(sk), &wait);
542 error = -EINPROGRESS; 542 error = -EINPROGRESS;
543 goto out; 543 goto out;
544 } 544 }
545 pr_debug("added wait queue\n"); 545 pr_debug("added wait queue\n");
546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 546 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
547 schedule(); 547 schedule();
548 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 548 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
549 } 549 }
550 finish_wait(sk->sk_sleep, &wait); 550 finish_wait(sk_sleep(sk), &wait);
551 error = xchg(&sk->sk_err_soft, 0); 551 error = xchg(&sk->sk_err_soft, 0);
552out: 552out:
553 release_sock(sk); 553 release_sock(sk);
@@ -563,13 +563,13 @@ static int svc_dropparty(struct socket *sock, int ep_ref)
563 563
564 lock_sock(sk); 564 lock_sock(sk);
565 set_bit(ATM_VF_WAITING, &vcc->flags); 565 set_bit(ATM_VF_WAITING, &vcc->flags);
566 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 566 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
567 sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); 567 sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref);
568 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { 568 while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) {
569 schedule(); 569 schedule();
570 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 570 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
571 } 571 }
572 finish_wait(sk->sk_sleep, &wait); 572 finish_wait(sk_sleep(sk), &wait);
573 if (!sigd) { 573 if (!sigd) {
574 error = -EUNATCH; 574 error = -EUNATCH;
575 goto out; 575 goto out;
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 65c5801261f9..cfdfd7e2a172 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1281,7 +1281,7 @@ static int __must_check ax25_connect(struct socket *sock,
1281 DEFINE_WAIT(wait); 1281 DEFINE_WAIT(wait);
1282 1282
1283 for (;;) { 1283 for (;;) {
1284 prepare_to_wait(sk->sk_sleep, &wait, 1284 prepare_to_wait(sk_sleep(sk), &wait,
1285 TASK_INTERRUPTIBLE); 1285 TASK_INTERRUPTIBLE);
1286 if (sk->sk_state != TCP_SYN_SENT) 1286 if (sk->sk_state != TCP_SYN_SENT)
1287 break; 1287 break;
@@ -1294,7 +1294,7 @@ static int __must_check ax25_connect(struct socket *sock,
1294 err = -ERESTARTSYS; 1294 err = -ERESTARTSYS;
1295 break; 1295 break;
1296 } 1296 }
1297 finish_wait(sk->sk_sleep, &wait); 1297 finish_wait(sk_sleep(sk), &wait);
1298 1298
1299 if (err) 1299 if (err)
1300 goto out_release; 1300 goto out_release;
@@ -1346,7 +1346,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1346 * hooked into the SABM we saved 1346 * hooked into the SABM we saved
1347 */ 1347 */
1348 for (;;) { 1348 for (;;) {
1349 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1349 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1350 skb = skb_dequeue(&sk->sk_receive_queue); 1350 skb = skb_dequeue(&sk->sk_receive_queue);
1351 if (skb) 1351 if (skb)
1352 break; 1352 break;
@@ -1364,7 +1364,7 @@ static int ax25_accept(struct socket *sock, struct socket *newsock, int flags)
1364 err = -ERESTARTSYS; 1364 err = -ERESTARTSYS;
1365 break; 1365 break;
1366 } 1366 }
1367 finish_wait(sk->sk_sleep, &wait); 1367 finish_wait(sk_sleep(sk), &wait);
1368 1368
1369 if (err) 1369 if (err)
1370 goto out; 1370 goto out;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 404a8500fd03..421c45bd1b95 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -288,7 +288,7 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
288 288
289 BT_DBG("sock %p, sk %p", sock, sk); 289 BT_DBG("sock %p, sk %p", sock, sk);
290 290
291 poll_wait(file, sk->sk_sleep, wait); 291 poll_wait(file, sk_sleep(sk), wait);
292 292
293 if (sk->sk_state == BT_LISTEN) 293 if (sk->sk_state == BT_LISTEN)
294 return bt_accept_poll(sk); 294 return bt_accept_poll(sk);
@@ -378,7 +378,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
378 378
379 BT_DBG("sk %p", sk); 379 BT_DBG("sk %p", sk);
380 380
381 add_wait_queue(sk->sk_sleep, &wait); 381 add_wait_queue(sk_sleep(sk), &wait);
382 while (sk->sk_state != state) { 382 while (sk->sk_state != state) {
383 set_current_state(TASK_INTERRUPTIBLE); 383 set_current_state(TASK_INTERRUPTIBLE);
384 384
@@ -401,7 +401,7 @@ int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo)
401 break; 401 break;
402 } 402 }
403 set_current_state(TASK_RUNNING); 403 set_current_state(TASK_RUNNING);
404 remove_wait_queue(sk->sk_sleep, &wait); 404 remove_wait_queue(sk_sleep(sk), &wait);
405 return err; 405 return err;
406} 406}
407EXPORT_SYMBOL(bt_sock_wait_state); 407EXPORT_SYMBOL(bt_sock_wait_state);
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 8062dad6d10d..f10b41fb05a0 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -474,7 +474,7 @@ static int bnep_session(void *arg)
474 set_user_nice(current, -15); 474 set_user_nice(current, -15);
475 475
476 init_waitqueue_entry(&wait, current); 476 init_waitqueue_entry(&wait, current);
477 add_wait_queue(sk->sk_sleep, &wait); 477 add_wait_queue(sk_sleep(sk), &wait);
478 while (!atomic_read(&s->killed)) { 478 while (!atomic_read(&s->killed)) {
479 set_current_state(TASK_INTERRUPTIBLE); 479 set_current_state(TASK_INTERRUPTIBLE);
480 480
@@ -496,7 +496,7 @@ static int bnep_session(void *arg)
496 schedule(); 496 schedule();
497 } 497 }
498 set_current_state(TASK_RUNNING); 498 set_current_state(TASK_RUNNING);
499 remove_wait_queue(sk->sk_sleep, &wait); 499 remove_wait_queue(sk_sleep(sk), &wait);
500 500
501 /* Cleanup session */ 501 /* Cleanup session */
502 down_write(&bnep_session_sem); 502 down_write(&bnep_session_sem);
@@ -507,7 +507,7 @@ static int bnep_session(void *arg)
507 /* Wakeup user-space polling for socket errors */ 507 /* Wakeup user-space polling for socket errors */
508 s->sock->sk->sk_err = EUNATCH; 508 s->sock->sk->sk_err = EUNATCH;
509 509
510 wake_up_interruptible(s->sock->sk->sk_sleep); 510 wake_up_interruptible(sk_sleep(s->sock->sk));
511 511
512 /* Release the socket */ 512 /* Release the socket */
513 fput(s->sock->file); 513 fput(s->sock->file);
@@ -638,7 +638,7 @@ int bnep_del_connection(struct bnep_conndel_req *req)
638 638
639 /* Kill session thread */ 639 /* Kill session thread */
640 atomic_inc(&s->killed); 640 atomic_inc(&s->killed);
641 wake_up_interruptible(s->sock->sk->sk_sleep); 641 wake_up_interruptible(sk_sleep(s->sock->sk));
642 } else 642 } else
643 err = -ENOENT; 643 err = -ENOENT;
644 644
diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c
index 5643a2391e76..0faad5ce6dc4 100644
--- a/net/bluetooth/bnep/netdev.c
+++ b/net/bluetooth/bnep/netdev.c
@@ -88,7 +88,7 @@ static void bnep_net_set_mc_list(struct net_device *dev)
88 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); 88 memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN);
89 r->len = htons(ETH_ALEN * 2); 89 r->len = htons(ETH_ALEN * 2);
90 } else { 90 } else {
91 struct dev_mc_list *dmi = dev->mc_list; 91 struct netdev_hw_addr *ha;
92 int i, len = skb->len; 92 int i, len = skb->len;
93 93
94 if (dev->flags & IFF_BROADCAST) { 94 if (dev->flags & IFF_BROADCAST) {
@@ -98,18 +98,18 @@ static void bnep_net_set_mc_list(struct net_device *dev)
98 98
99 /* FIXME: We should group addresses here. */ 99 /* FIXME: We should group addresses here. */
100 100
101 for (i = 0; 101 i = 0;
102 i < netdev_mc_count(dev) && i < BNEP_MAX_MULTICAST_FILTERS; 102 netdev_for_each_mc_addr(ha, dev) {
103 i++) { 103 if (i == BNEP_MAX_MULTICAST_FILTERS)
104 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 104 break;
105 memcpy(__skb_put(skb, ETH_ALEN), dmi->dmi_addr, ETH_ALEN); 105 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
106 dmi = dmi->next; 106 memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN);
107 } 107 }
108 r->len = htons(skb->len - len); 108 r->len = htons(skb->len - len);
109 } 109 }
110 110
111 skb_queue_tail(&sk->sk_write_queue, skb); 111 skb_queue_tail(&sk->sk_write_queue, skb);
112 wake_up_interruptible(sk->sk_sleep); 112 wake_up_interruptible(sk_sleep(sk));
113#endif 113#endif
114} 114}
115 115
@@ -193,11 +193,11 @@ static netdev_tx_t bnep_net_xmit(struct sk_buff *skb,
193 /* 193 /*
194 * We cannot send L2CAP packets from here as we are potentially in a bh. 194 * We cannot send L2CAP packets from here as we are potentially in a bh.
195 * So we have to queue them and wake up session thread which is sleeping 195 * So we have to queue them and wake up session thread which is sleeping
196 * on the sk->sk_sleep. 196 * on the sk_sleep(sk).
197 */ 197 */
198 dev->trans_start = jiffies; 198 dev->trans_start = jiffies;
199 skb_queue_tail(&sk->sk_write_queue, skb); 199 skb_queue_tail(&sk->sk_write_queue, skb);
200 wake_up_interruptible(sk->sk_sleep); 200 wake_up_interruptible(sk_sleep(sk));
201 201
202 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { 202 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) {
203 BT_DBG("tx queue is full"); 203 BT_DBG("tx queue is full");
diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h
index e4663aa14d26..785e79e953c5 100644
--- a/net/bluetooth/cmtp/cmtp.h
+++ b/net/bluetooth/cmtp/cmtp.h
@@ -125,7 +125,7 @@ static inline void cmtp_schedule(struct cmtp_session *session)
125{ 125{
126 struct sock *sk = session->sock->sk; 126 struct sock *sk = session->sock->sk;
127 127
128 wake_up_interruptible(sk->sk_sleep); 128 wake_up_interruptible(sk_sleep(sk));
129} 129}
130 130
131/* CMTP init defines */ 131/* CMTP init defines */
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 0073ec8495da..d4c6af082d48 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -284,7 +284,7 @@ static int cmtp_session(void *arg)
284 set_user_nice(current, -15); 284 set_user_nice(current, -15);
285 285
286 init_waitqueue_entry(&wait, current); 286 init_waitqueue_entry(&wait, current);
287 add_wait_queue(sk->sk_sleep, &wait); 287 add_wait_queue(sk_sleep(sk), &wait);
288 while (!atomic_read(&session->terminate)) { 288 while (!atomic_read(&session->terminate)) {
289 set_current_state(TASK_INTERRUPTIBLE); 289 set_current_state(TASK_INTERRUPTIBLE);
290 290
@@ -301,7 +301,7 @@ static int cmtp_session(void *arg)
301 schedule(); 301 schedule();
302 } 302 }
303 set_current_state(TASK_RUNNING); 303 set_current_state(TASK_RUNNING);
304 remove_wait_queue(sk->sk_sleep, &wait); 304 remove_wait_queue(sk_sleep(sk), &wait);
305 305
306 down_write(&cmtp_session_sem); 306 down_write(&cmtp_session_sem);
307 307
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 280529ad9274..bfe641b7dfaf 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -561,8 +561,8 @@ static int hidp_session(void *arg)
561 561
562 init_waitqueue_entry(&ctrl_wait, current); 562 init_waitqueue_entry(&ctrl_wait, current);
563 init_waitqueue_entry(&intr_wait, current); 563 init_waitqueue_entry(&intr_wait, current);
564 add_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 564 add_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
565 add_wait_queue(intr_sk->sk_sleep, &intr_wait); 565 add_wait_queue(sk_sleep(intr_sk), &intr_wait);
566 while (!atomic_read(&session->terminate)) { 566 while (!atomic_read(&session->terminate)) {
567 set_current_state(TASK_INTERRUPTIBLE); 567 set_current_state(TASK_INTERRUPTIBLE);
568 568
@@ -584,8 +584,8 @@ static int hidp_session(void *arg)
584 schedule(); 584 schedule();
585 } 585 }
586 set_current_state(TASK_RUNNING); 586 set_current_state(TASK_RUNNING);
587 remove_wait_queue(intr_sk->sk_sleep, &intr_wait); 587 remove_wait_queue(sk_sleep(intr_sk), &intr_wait);
588 remove_wait_queue(ctrl_sk->sk_sleep, &ctrl_wait); 588 remove_wait_queue(sk_sleep(ctrl_sk), &ctrl_wait);
589 589
590 down_write(&hidp_session_sem); 590 down_write(&hidp_session_sem);
591 591
@@ -609,7 +609,7 @@ static int hidp_session(void *arg)
609 609
610 fput(session->intr_sock->file); 610 fput(session->intr_sock->file);
611 611
612 wait_event_timeout(*(ctrl_sk->sk_sleep), 612 wait_event_timeout(*(sk_sleep(ctrl_sk)),
613 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500)); 613 (ctrl_sk->sk_state == BT_CLOSED), msecs_to_jiffies(500));
614 614
615 fput(session->ctrl_sock->file); 615 fput(session->ctrl_sock->file);
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index a4e215d50c10..8d934a19da0a 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -164,8 +164,8 @@ static inline void hidp_schedule(struct hidp_session *session)
164 struct sock *ctrl_sk = session->ctrl_sock->sk; 164 struct sock *ctrl_sk = session->ctrl_sock->sk;
165 struct sock *intr_sk = session->intr_sock->sk; 165 struct sock *intr_sk = session->intr_sock->sk;
166 166
167 wake_up_interruptible(ctrl_sk->sk_sleep); 167 wake_up_interruptible(sk_sleep(ctrl_sk));
168 wake_up_interruptible(intr_sk->sk_sleep); 168 wake_up_interruptible(sk_sleep(intr_sk));
169} 169}
170 170
171/* HIDP init defines */ 171/* HIDP init defines */
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c
index 99d68c34e4f1..c1e60eed5a97 100644
--- a/net/bluetooth/l2cap.c
+++ b/net/bluetooth/l2cap.c
@@ -1147,7 +1147,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1147 BT_DBG("sk %p timeo %ld", sk, timeo); 1147 BT_DBG("sk %p timeo %ld", sk, timeo);
1148 1148
1149 /* Wait for an incoming connection. (wake-one). */ 1149 /* Wait for an incoming connection. (wake-one). */
1150 add_wait_queue_exclusive(sk->sk_sleep, &wait); 1150 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 1151 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1152 set_current_state(TASK_INTERRUPTIBLE); 1152 set_current_state(TASK_INTERRUPTIBLE);
1153 if (!timeo) { 1153 if (!timeo) {
@@ -1170,7 +1170,7 @@ static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int fl
1170 } 1170 }
1171 } 1171 }
1172 set_current_state(TASK_RUNNING); 1172 set_current_state(TASK_RUNNING);
1173 remove_wait_queue(sk->sk_sleep, &wait); 1173 remove_wait_queue(sk_sleep(sk), &wait);
1174 1174
1175 if (err) 1175 if (err)
1176 goto done; 1176 goto done;
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 8ed3c37684fa..43fbf6b4b4bf 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -503,7 +503,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
503 BT_DBG("sk %p timeo %ld", sk, timeo); 503 BT_DBG("sk %p timeo %ld", sk, timeo);
504 504
505 /* Wait for an incoming connection. (wake-one). */ 505 /* Wait for an incoming connection. (wake-one). */
506 add_wait_queue_exclusive(sk->sk_sleep, &wait); 506 add_wait_queue_exclusive(sk_sleep(sk), &wait);
507 while (!(nsk = bt_accept_dequeue(sk, newsock))) { 507 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
508 set_current_state(TASK_INTERRUPTIBLE); 508 set_current_state(TASK_INTERRUPTIBLE);
509 if (!timeo) { 509 if (!timeo) {
@@ -526,7 +526,7 @@ static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int f
526 } 526 }
527 } 527 }
528 set_current_state(TASK_RUNNING); 528 set_current_state(TASK_RUNNING);
529 remove_wait_queue(sk->sk_sleep, &wait); 529 remove_wait_queue(sk_sleep(sk), &wait);
530 530
531 if (err) 531 if (err)
532 goto done; 532 goto done;
@@ -621,7 +621,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
621{ 621{
622 DECLARE_WAITQUEUE(wait, current); 622 DECLARE_WAITQUEUE(wait, current);
623 623
624 add_wait_queue(sk->sk_sleep, &wait); 624 add_wait_queue(sk_sleep(sk), &wait);
625 for (;;) { 625 for (;;) {
626 set_current_state(TASK_INTERRUPTIBLE); 626 set_current_state(TASK_INTERRUPTIBLE);
627 627
@@ -640,7 +640,7 @@ static long rfcomm_sock_data_wait(struct sock *sk, long timeo)
640 } 640 }
641 641
642 __set_current_state(TASK_RUNNING); 642 __set_current_state(TASK_RUNNING);
643 remove_wait_queue(sk->sk_sleep, &wait); 643 remove_wait_queue(sk_sleep(sk), &wait);
644 return timeo; 644 return timeo;
645} 645}
646 646
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index ca6b2ad1c3fc..b406d3eff53a 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -567,7 +567,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
567 BT_DBG("sk %p timeo %ld", sk, timeo); 567 BT_DBG("sk %p timeo %ld", sk, timeo);
568 568
569 /* Wait for an incoming connection. (wake-one). */ 569 /* Wait for an incoming connection. (wake-one). */
570 add_wait_queue_exclusive(sk->sk_sleep, &wait); 570 add_wait_queue_exclusive(sk_sleep(sk), &wait);
571 while (!(ch = bt_accept_dequeue(sk, newsock))) { 571 while (!(ch = bt_accept_dequeue(sk, newsock))) {
572 set_current_state(TASK_INTERRUPTIBLE); 572 set_current_state(TASK_INTERRUPTIBLE);
573 if (!timeo) { 573 if (!timeo) {
@@ -590,7 +590,7 @@ static int sco_sock_accept(struct socket *sock, struct socket *newsock, int flag
590 } 590 }
591 } 591 }
592 set_current_state(TASK_RUNNING); 592 set_current_state(TASK_RUNNING);
593 remove_wait_queue(sk->sk_sleep, &wait); 593 remove_wait_queue(sk_sleep(sk), &wait);
594 594
595 if (err) 595 if (err)
596 goto done; 596 goto done;
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 90a9024e5c1e..5b8a6e73b02f 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -26,11 +26,12 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
26 const unsigned char *dest = skb->data; 26 const unsigned char *dest = skb->data;
27 struct net_bridge_fdb_entry *dst; 27 struct net_bridge_fdb_entry *dst;
28 struct net_bridge_mdb_entry *mdst; 28 struct net_bridge_mdb_entry *mdst;
29 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
29 30
30 BR_INPUT_SKB_CB(skb)->brdev = dev; 31 brstats->tx_packets++;
32 brstats->tx_bytes += skb->len;
31 33
32 dev->stats.tx_packets++; 34 BR_INPUT_SKB_CB(skb)->brdev = dev;
33 dev->stats.tx_bytes += skb->len;
34 35
35 skb_reset_mac_header(skb); 36 skb_reset_mac_header(skb);
36 skb_pull(skb, ETH_HLEN); 37 skb_pull(skb, ETH_HLEN);
@@ -81,6 +82,31 @@ static int br_dev_stop(struct net_device *dev)
81 return 0; 82 return 0;
82} 83}
83 84
85static struct net_device_stats *br_get_stats(struct net_device *dev)
86{
87 struct net_bridge *br = netdev_priv(dev);
88 struct net_device_stats *stats = &dev->stats;
89 struct br_cpu_netstats sum = { 0 };
90 unsigned int cpu;
91
92 for_each_possible_cpu(cpu) {
93 const struct br_cpu_netstats *bstats
94 = per_cpu_ptr(br->stats, cpu);
95
96 sum.tx_bytes += bstats->tx_bytes;
97 sum.tx_packets += bstats->tx_packets;
98 sum.rx_bytes += bstats->rx_bytes;
99 sum.rx_packets += bstats->rx_packets;
100 }
101
102 stats->tx_bytes = sum.tx_bytes;
103 stats->tx_packets = sum.tx_packets;
104 stats->rx_bytes = sum.rx_bytes;
105 stats->rx_packets = sum.rx_packets;
106
107 return stats;
108}
109
84static int br_change_mtu(struct net_device *dev, int new_mtu) 110static int br_change_mtu(struct net_device *dev, int new_mtu)
85{ 111{
86 struct net_bridge *br = netdev_priv(dev); 112 struct net_bridge *br = netdev_priv(dev);
@@ -180,19 +206,28 @@ static const struct net_device_ops br_netdev_ops = {
180 .ndo_open = br_dev_open, 206 .ndo_open = br_dev_open,
181 .ndo_stop = br_dev_stop, 207 .ndo_stop = br_dev_stop,
182 .ndo_start_xmit = br_dev_xmit, 208 .ndo_start_xmit = br_dev_xmit,
209 .ndo_get_stats = br_get_stats,
183 .ndo_set_mac_address = br_set_mac_address, 210 .ndo_set_mac_address = br_set_mac_address,
184 .ndo_set_multicast_list = br_dev_set_multicast_list, 211 .ndo_set_multicast_list = br_dev_set_multicast_list,
185 .ndo_change_mtu = br_change_mtu, 212 .ndo_change_mtu = br_change_mtu,
186 .ndo_do_ioctl = br_dev_ioctl, 213 .ndo_do_ioctl = br_dev_ioctl,
187}; 214};
188 215
216static void br_dev_free(struct net_device *dev)
217{
218 struct net_bridge *br = netdev_priv(dev);
219
220 free_percpu(br->stats);
221 free_netdev(dev);
222}
223
189void br_dev_setup(struct net_device *dev) 224void br_dev_setup(struct net_device *dev)
190{ 225{
191 random_ether_addr(dev->dev_addr); 226 random_ether_addr(dev->dev_addr);
192 ether_setup(dev); 227 ether_setup(dev);
193 228
194 dev->netdev_ops = &br_netdev_ops; 229 dev->netdev_ops = &br_netdev_ops;
195 dev->destructor = free_netdev; 230 dev->destructor = br_dev_free;
196 SET_ETHTOOL_OPS(dev, &br_ethtool_ops); 231 SET_ETHTOOL_OPS(dev, &br_ethtool_ops);
197 dev->tx_queue_len = 0; 232 dev->tx_queue_len = 0;
198 dev->priv_flags = IFF_EBRIDGE; 233 dev->priv_flags = IFF_EBRIDGE;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 0b6b1f2ff7ac..521439333316 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -186,6 +186,12 @@ static struct net_device *new_bridge_dev(struct net *net, const char *name)
186 br = netdev_priv(dev); 186 br = netdev_priv(dev);
187 br->dev = dev; 187 br->dev = dev;
188 188
189 br->stats = alloc_percpu(struct br_cpu_netstats);
190 if (!br->stats) {
191 free_netdev(dev);
192 return NULL;
193 }
194
189 spin_lock_init(&br->lock); 195 spin_lock_init(&br->lock);
190 INIT_LIST_HEAD(&br->port_list); 196 INIT_LIST_HEAD(&br->port_list);
191 spin_lock_init(&br->hash_lock); 197 spin_lock_init(&br->hash_lock);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index a82dde2d2ead..e7f4c1d02f57 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -24,9 +24,11 @@ const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
24static int br_pass_frame_up(struct sk_buff *skb) 24static int br_pass_frame_up(struct sk_buff *skb)
25{ 25{
26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; 26 struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
27 struct net_bridge *br = netdev_priv(brdev);
28 struct br_cpu_netstats *brstats = this_cpu_ptr(br->stats);
27 29
28 brdev->stats.rx_packets++; 30 brstats->rx_packets++;
29 brdev->stats.rx_bytes += skb->len; 31 brstats->rx_bytes += skb->len;
30 32
31 indev = skb->dev; 33 indev = skb->dev;
32 skb->dev = brdev; 34 skb->dev = brdev;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 386c15369d91..81bfdfe14ce5 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -29,7 +29,7 @@
29 29
30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) 30static inline int br_ip_hash(struct net_bridge_mdb_htable *mdb, __be32 ip)
31{ 31{
32 return jhash_1word(mdb->secret, (u32)ip) & (mdb->max - 1); 32 return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1);
33} 33}
34 34
35static struct net_bridge_mdb_entry *__br_mdb_ip_get( 35static struct net_bridge_mdb_entry *__br_mdb_ip_get(
@@ -1003,8 +1003,6 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
1003 if (!pskb_may_pull(skb2, sizeof(*ih))) 1003 if (!pskb_may_pull(skb2, sizeof(*ih)))
1004 goto out; 1004 goto out;
1005 1005
1006 iph = ip_hdr(skb2);
1007
1008 switch (skb2->ip_summed) { 1006 switch (skb2->ip_summed) {
1009 case CHECKSUM_COMPLETE: 1007 case CHECKSUM_COMPLETE:
1010 if (!csum_fold(skb2->csum)) 1008 if (!csum_fold(skb2->csum))
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 763a3ec292e5..1413b72acc7f 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -82,6 +82,10 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
82 case NETDEV_UNREGISTER: 82 case NETDEV_UNREGISTER:
83 br_del_if(br, dev); 83 br_del_if(br, dev);
84 break; 84 break;
85
86 case NETDEV_PRE_TYPE_CHANGE:
87 /* Forbid underlaying device to change its type. */
88 return NOTIFY_BAD;
85 } 89 }
86 90
87 /* Events that may cause spanning tree to refresh */ 91 /* Events that may cause spanning tree to refresh */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 846d7d1e2075..63181e4a2a67 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -130,11 +130,20 @@ struct net_bridge_port
130#endif 130#endif
131}; 131};
132 132
133struct br_cpu_netstats {
134 unsigned long rx_packets;
135 unsigned long rx_bytes;
136 unsigned long tx_packets;
137 unsigned long tx_bytes;
138};
139
133struct net_bridge 140struct net_bridge
134{ 141{
135 spinlock_t lock; 142 spinlock_t lock;
136 struct list_head port_list; 143 struct list_head port_list;
137 struct net_device *dev; 144 struct net_device *dev;
145
146 struct br_cpu_netstats __percpu *stats;
138 spinlock_t hash_lock; 147 spinlock_t hash_lock;
139 struct hlist_head hash[BR_HASH_SIZE]; 148 struct hlist_head hash[BR_HASH_SIZE];
140 unsigned long feature_mask; 149 unsigned long feature_mask;
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 000000000000..cd1daf6008bd
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,48 @@
1#
2# CAIF net configurations
3#
4
5#menu "CAIF Support"
6comment "CAIF Support"
7menuconfig CAIF
8 tristate "Enable CAIF support"
9 select CRC_CCITT
10 default n
11 ---help---
12 The "Communication CPU to Application CPU Interface" (CAIF) is a packet
13 based connection-oriented MUX protocol developed by ST-Ericsson for use
14 with its modems. It is accessed from user space as sockets (PF_CAIF).
15
16 Say Y (or M) here if you build for a phone product (e.g. Android or
17 MeeGo ) that uses CAIF as transport, if unsure say N.
18
19 If you select to build it as module then CAIF_NETDEV also needs to be
20 built as modules. You will also need to say yes to any CAIF physical
21 devices that your platform requires.
22
23 See Documentation/networking/caif for a further explanation on how to
24 use and configure CAIF.
25
26if CAIF
27
28config CAIF_DEBUG
29 bool "Enable Debug"
30 default n
31 --- help ---
32 Enable the inclusion of debug code in the CAIF stack.
33 Be aware that doing this will impact performance.
34 If unsure say N.
35
36
37config CAIF_NETDEV
38 tristate "CAIF GPRS Network device"
39 default CAIF
40 ---help---
41 Say Y if you will be using a CAIF based GPRS network device.
42 This can be either built-in or a loadable module,
43 If you select to build it as a built-in then the main CAIF device must
44 also be a built-in.
45 If unsure say Y.
46
47endif
48#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 000000000000..34852af2595e
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,26 @@
1ifeq ($(CONFIG_CAIF_DEBUG),1)
2CAIF_DBG_FLAGS := -DDEBUG
3endif
4
5ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
6
7caif-objs := caif_dev.o \
8 cfcnfg.o cfmuxl.o cfctrl.o \
9 cffrml.o cfveil.o cfdbgl.o\
10 cfserl.o cfdgml.o \
11 cfrfml.o cfvidl.o cfutill.o \
12 cfsrvl.o cfpkt_skbuff.o caif_config_util.o
13clean-dirs:= .tmp_versions
14
15clean-files:= \
16 Module.symvers \
17 modules.order \
18 *.cmd \
19 *.o \
20 *~
21
22obj-$(CONFIG_CAIF) += caif.o
23obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
24obj-$(CONFIG_CAIF) += caif_socket.o
25
26export-objs := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 000000000000..6f36580366f0
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,87 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/module.h>
8#include <linux/spinlock.h>
9#include <net/caif/cfctrl.h>
10#include <net/caif/cfcnfg.h>
11#include <net/caif/caif_dev.h>
12
13int connect_req_to_link_param(struct cfcnfg *cnfg,
14 struct caif_connect_request *s,
15 struct cfctrl_link_param *l)
16{
17 struct dev_info *dev_info;
18 enum cfcnfg_phy_preference pref;
19 memset(l, 0, sizeof(*l));
20 l->priority = s->priority;
21
22 if (s->link_name[0] != '\0')
23 l->phyid = cfcnfg_get_named(cnfg, s->link_name);
24 else {
25 switch (s->link_selector) {
26 case CAIF_LINK_HIGH_BANDW:
27 pref = CFPHYPREF_HIGH_BW;
28 break;
29 case CAIF_LINK_LOW_LATENCY:
30 pref = CFPHYPREF_LOW_LAT;
31 break;
32 default:
33 return -EINVAL;
34 }
35 dev_info = cfcnfg_get_phyid(cnfg, pref);
36 if (dev_info == NULL)
37 return -ENODEV;
38 l->phyid = dev_info->id;
39 }
40 switch (s->protocol) {
41 case CAIFPROTO_AT:
42 l->linktype = CFCTRL_SRV_VEI;
43 if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
44 l->chtype = 0x02;
45 else
46 l->chtype = s->sockaddr.u.at.type;
47 l->endpoint = 0x00;
48 break;
49 case CAIFPROTO_DATAGRAM:
50 l->linktype = CFCTRL_SRV_DATAGRAM;
51 l->chtype = 0x00;
52 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
53 break;
54 case CAIFPROTO_DATAGRAM_LOOP:
55 l->linktype = CFCTRL_SRV_DATAGRAM;
56 l->chtype = 0x03;
57 l->endpoint = 0x00;
58 l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
59 break;
60 case CAIFPROTO_RFM:
61 l->linktype = CFCTRL_SRV_RFM;
62 l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
63 strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
64 sizeof(l->u.rfm.volume)-1);
65 l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
66 break;
67 case CAIFPROTO_UTIL:
68 l->linktype = CFCTRL_SRV_UTIL;
69 l->endpoint = 0x00;
70 l->chtype = 0x00;
71 strncpy(l->u.utility.name, s->sockaddr.u.util.service,
72 sizeof(l->u.utility.name)-1);
73 l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
74 caif_assert(sizeof(l->u.utility.name) > 10);
75 l->u.utility.paramlen = s->param.size;
76 if (l->u.utility.paramlen > sizeof(l->u.utility.params))
77 l->u.utility.paramlen = sizeof(l->u.utility.params);
78
79 memcpy(l->u.utility.params, s->param.data,
80 l->u.utility.paramlen);
81
82 break;
83 default:
84 return -EINVAL;
85 }
86 return 0;
87}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 000000000000..e84837e1bc86
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,413 @@
1/*
2 * CAIF Interface registration.
3 * Copyright (C) ST-Ericsson AB 2010
4 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 *
7 * Borrowed heavily from file: pn_dev.c. Thanks to
8 * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
9 * and Sakari Ailus <sakari.ailus@nokia.com>
10 */
11
12#include <linux/version.h>
13#include <linux/module.h>
14#include <linux/kernel.h>
15#include <linux/if_arp.h>
16#include <linux/net.h>
17#include <linux/netdevice.h>
18#include <linux/skbuff.h>
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <net/netns/generic.h>
22#include <net/net_namespace.h>
23#include <net/pkt_sched.h>
24#include <net/caif/caif_device.h>
25#include <net/caif/caif_dev.h>
26#include <net/caif/caif_layer.h>
27#include <net/caif/cfpkt.h>
28#include <net/caif/cfcnfg.h>
29
30MODULE_LICENSE("GPL");
31#define TIMEOUT (HZ*5)
32
33/* Used for local tracking of the CAIF net devices */
34struct caif_device_entry {
35 struct cflayer layer;
36 struct list_head list;
37 atomic_t in_use;
38 atomic_t state;
39 u16 phyid;
40 struct net_device *netdev;
41 wait_queue_head_t event;
42};
43
44struct caif_device_entry_list {
45 struct list_head list;
46 /* Protects simulanous deletes in list */
47 spinlock_t lock;
48};
49
50struct caif_net {
51 struct caif_device_entry_list caifdevs;
52};
53
54static int caif_net_id;
55static struct cfcnfg *cfg;
56
57static struct caif_device_entry_list *caif_device_list(struct net *net)
58{
59 struct caif_net *caifn;
60 BUG_ON(!net);
61 caifn = net_generic(net, caif_net_id);
62 BUG_ON(!caifn);
63 return &caifn->caifdevs;
64}
65
66/* Allocate new CAIF device. */
67static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
68{
69 struct caif_device_entry_list *caifdevs;
70 struct caif_device_entry *caifd;
71 caifdevs = caif_device_list(dev_net(dev));
72 BUG_ON(!caifdevs);
73 caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
74 if (!caifd)
75 return NULL;
76 caifd->netdev = dev;
77 list_add(&caifd->list, &caifdevs->list);
78 init_waitqueue_head(&caifd->event);
79 return caifd;
80}
81
82static struct caif_device_entry *caif_get(struct net_device *dev)
83{
84 struct caif_device_entry_list *caifdevs =
85 caif_device_list(dev_net(dev));
86 struct caif_device_entry *caifd;
87 BUG_ON(!caifdevs);
88 list_for_each_entry(caifd, &caifdevs->list, list) {
89 if (caifd->netdev == dev)
90 return caifd;
91 }
92 return NULL;
93}
94
95static void caif_device_destroy(struct net_device *dev)
96{
97 struct caif_device_entry_list *caifdevs =
98 caif_device_list(dev_net(dev));
99 struct caif_device_entry *caifd;
100 ASSERT_RTNL();
101 if (dev->type != ARPHRD_CAIF)
102 return;
103
104 spin_lock_bh(&caifdevs->lock);
105 caifd = caif_get(dev);
106 if (caifd == NULL) {
107 spin_unlock_bh(&caifdevs->lock);
108 return;
109 }
110
111 list_del(&caifd->list);
112 spin_unlock_bh(&caifdevs->lock);
113
114 kfree(caifd);
115 return;
116}
117
118static int transmit(struct cflayer *layer, struct cfpkt *pkt)
119{
120 struct caif_device_entry *caifd =
121 container_of(layer, struct caif_device_entry, layer);
122 struct sk_buff *skb, *skb2;
123 int ret = -EINVAL;
124 skb = cfpkt_tonative(pkt);
125 skb->dev = caifd->netdev;
126 /*
127 * Don't allow SKB to be destroyed upon error, but signal resend
128 * notification to clients. We can't rely on the return value as
129 * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
130 */
131 if (netif_queue_stopped(caifd->netdev))
132 return -EAGAIN;
133 skb2 = skb_get(skb);
134
135 ret = dev_queue_xmit(skb2);
136
137 if (!ret)
138 kfree_skb(skb);
139 else
140 return -EAGAIN;
141
142 return 0;
143}
144
145static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
146{
147 struct caif_device_entry *caifd;
148 struct caif_dev_common *caifdev;
149 caifd = container_of(layr, struct caif_device_entry, layer);
150 caifdev = netdev_priv(caifd->netdev);
151 if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
152 atomic_set(&caifd->in_use, 1);
153 wake_up_interruptible(&caifd->event);
154
155 } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
156 atomic_set(&caifd->in_use, 0);
157 wake_up_interruptible(&caifd->event);
158 }
159 return 0;
160}
161
162/*
163 * Stuff received packets to associated sockets.
164 * On error, returns non-zero and releases the skb.
165 */
166static int receive(struct sk_buff *skb, struct net_device *dev,
167 struct packet_type *pkttype, struct net_device *orig_dev)
168{
169 struct net *net;
170 struct cfpkt *pkt;
171 struct caif_device_entry *caifd;
172 net = dev_net(dev);
173 pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
174 caifd = caif_get(dev);
175 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
176 return NET_RX_DROP;
177
178 if (caifd->layer.up->receive(caifd->layer.up, pkt))
179 return NET_RX_DROP;
180
181 return 0;
182}
183
184static struct packet_type caif_packet_type __read_mostly = {
185 .type = cpu_to_be16(ETH_P_CAIF),
186 .func = receive,
187};
188
189static void dev_flowctrl(struct net_device *dev, int on)
190{
191 struct caif_device_entry *caifd = caif_get(dev);
192 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
193 return;
194
195 caifd->layer.up->ctrlcmd(caifd->layer.up,
196 on ?
197 _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
198 _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
199 caifd->layer.id);
200}
201
202/* notify Caif of device events */
203static int caif_device_notify(struct notifier_block *me, unsigned long what,
204 void *arg)
205{
206 struct net_device *dev = arg;
207 struct caif_device_entry *caifd = NULL;
208 struct caif_dev_common *caifdev;
209 enum cfcnfg_phy_preference pref;
210 int res = -EINVAL;
211 enum cfcnfg_phy_type phy_type;
212
213 if (dev->type != ARPHRD_CAIF)
214 return 0;
215
216 switch (what) {
217 case NETDEV_REGISTER:
218 pr_info("CAIF: %s():register %s\n", __func__, dev->name);
219 caifd = caif_device_alloc(dev);
220 if (caifd == NULL)
221 break;
222 caifdev = netdev_priv(dev);
223 caifdev->flowctrl = dev_flowctrl;
224 atomic_set(&caifd->state, what);
225 res = 0;
226 break;
227
228 case NETDEV_UP:
229 pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
230 caifd = caif_get(dev);
231 if (caifd == NULL)
232 break;
233 caifdev = netdev_priv(dev);
234 if (atomic_read(&caifd->state) == NETDEV_UP) {
235 pr_info("CAIF: %s():%s already up\n",
236 __func__, dev->name);
237 break;
238 }
239 atomic_set(&caifd->state, what);
240 caifd->layer.transmit = transmit;
241 caifd->layer.modemcmd = modemcmd;
242
243 if (caifdev->use_frag)
244 phy_type = CFPHYTYPE_FRAG;
245 else
246 phy_type = CFPHYTYPE_CAIF;
247
248 switch (caifdev->link_select) {
249 case CAIF_LINK_HIGH_BANDW:
250 pref = CFPHYPREF_LOW_LAT;
251 break;
252 case CAIF_LINK_LOW_LATENCY:
253 pref = CFPHYPREF_HIGH_BW;
254 break;
255 default:
256 pref = CFPHYPREF_HIGH_BW;
257 break;
258 }
259
260 cfcnfg_add_phy_layer(get_caif_conf(),
261 phy_type,
262 dev,
263 &caifd->layer,
264 &caifd->phyid,
265 pref,
266 caifdev->use_fcs,
267 caifdev->use_stx);
268 strncpy(caifd->layer.name, dev->name,
269 sizeof(caifd->layer.name) - 1);
270 caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
271 break;
272
273 case NETDEV_GOING_DOWN:
274 caifd = caif_get(dev);
275 if (caifd == NULL)
276 break;
277 pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
278
279 if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
280 atomic_read(&caifd->state) == NETDEV_DOWN)
281 break;
282
283 atomic_set(&caifd->state, what);
284 if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
285 return -EINVAL;
286 caifd->layer.up->ctrlcmd(caifd->layer.up,
287 _CAIF_CTRLCMD_PHYIF_DOWN_IND,
288 caifd->layer.id);
289 res = wait_event_interruptible_timeout(caifd->event,
290 atomic_read(&caifd->in_use) == 0,
291 TIMEOUT);
292 break;
293
294 case NETDEV_DOWN:
295 caifd = caif_get(dev);
296 if (caifd == NULL)
297 break;
298 pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
299 if (atomic_read(&caifd->in_use))
300 pr_warning("CAIF: %s(): "
301 "Unregistering an active CAIF device: %s\n",
302 __func__, dev->name);
303 cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
304 atomic_set(&caifd->state, what);
305 break;
306
307 case NETDEV_UNREGISTER:
308 caifd = caif_get(dev);
309 pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
310 atomic_set(&caifd->state, what);
311 caif_device_destroy(dev);
312 break;
313 }
314 return 0;
315}
316
317static struct notifier_block caif_device_notifier = {
318 .notifier_call = caif_device_notify,
319 .priority = 0,
320};
321
322
323struct cfcnfg *get_caif_conf(void)
324{
325 return cfg;
326}
327EXPORT_SYMBOL(get_caif_conf);
328
329int caif_connect_client(struct caif_connect_request *conn_req,
330 struct cflayer *client_layer)
331{
332 struct cfctrl_link_param param;
333 if (connect_req_to_link_param(get_caif_conf(), conn_req, &param) == 0)
334 /* Hook up the adaptation layer. */
335 return cfcnfg_add_adaptation_layer(get_caif_conf(),
336 &param, client_layer);
337
338 return -EINVAL;
339
340 caif_assert(0);
341}
342EXPORT_SYMBOL(caif_connect_client);
343
344int caif_disconnect_client(struct cflayer *adap_layer)
345{
346 return cfcnfg_del_adapt_layer(get_caif_conf(), adap_layer);
347}
348EXPORT_SYMBOL(caif_disconnect_client);
349
350/* Per-namespace Caif devices handling */
351static int caif_init_net(struct net *net)
352{
353 struct caif_net *caifn = net_generic(net, caif_net_id);
354 INIT_LIST_HEAD(&caifn->caifdevs.list);
355 spin_lock_init(&caifn->caifdevs.lock);
356 return 0;
357}
358
359static void caif_exit_net(struct net *net)
360{
361 struct net_device *dev;
362 int res;
363 rtnl_lock();
364 for_each_netdev(net, dev) {
365 if (dev->type != ARPHRD_CAIF)
366 continue;
367 res = dev_close(dev);
368 caif_device_destroy(dev);
369 }
370 rtnl_unlock();
371}
372
373static struct pernet_operations caif_net_ops = {
374 .init = caif_init_net,
375 .exit = caif_exit_net,
376 .id = &caif_net_id,
377 .size = sizeof(struct caif_net),
378};
379
380/* Initialize Caif devices list */
381static int __init caif_device_init(void)
382{
383 int result;
384 cfg = cfcnfg_create();
385 if (!cfg) {
386 pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
387 goto err_cfcnfg_create_failed;
388 }
389 result = register_pernet_device(&caif_net_ops);
390
391 if (result) {
392 kfree(cfg);
393 cfg = NULL;
394 return result;
395 }
396 dev_add_pack(&caif_packet_type);
397 register_netdevice_notifier(&caif_device_notifier);
398
399 return result;
400err_cfcnfg_create_failed:
401 return -ENODEV;
402}
403
404static void __exit caif_device_exit(void)
405{
406 dev_remove_pack(&caif_packet_type);
407 unregister_pernet_device(&caif_net_ops);
408 unregister_netdevice_notifier(&caif_device_notifier);
409 cfcnfg_remove(cfg);
410}
411
412module_init(caif_device_init);
413module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 000000000000..90317e7d10b4
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1391 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland sjur.brandeland@stericsson.com
4 * Per Sigmond per.sigmond@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/fs.h>
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/sched.h>
12#include <linux/spinlock.h>
13#include <linux/mutex.h>
14#include <linux/list.h>
15#include <linux/wait.h>
16#include <linux/poll.h>
17#include <linux/tcp.h>
18#include <linux/uaccess.h>
19#include <asm/atomic.h>
20
21#include <linux/caif/caif_socket.h>
22#include <net/caif/caif_layer.h>
23#include <net/caif/caif_dev.h>
24#include <net/caif/cfpkt.h>
25
26MODULE_LICENSE("GPL");
27
28#define CHNL_SKT_READ_QUEUE_HIGH 200
29#define CHNL_SKT_READ_QUEUE_LOW 100
30
31static int caif_sockbuf_size = 40000;
32static atomic_t caif_nr_socks = ATOMIC_INIT(0);
33
34#define CONN_STATE_OPEN_BIT 1
35#define CONN_STATE_PENDING_BIT 2
36#define CONN_STATE_PEND_DESTROY_BIT 3
37#define CONN_REMOTE_SHUTDOWN_BIT 4
38
39#define TX_FLOW_ON_BIT 1
40#define RX_FLOW_ON_BIT 2
41
42#define STATE_IS_OPEN(cf_sk) test_bit(CONN_STATE_OPEN_BIT,\
43 (void *) &(cf_sk)->conn_state)
44#define STATE_IS_REMOTE_SHUTDOWN(cf_sk) test_bit(CONN_REMOTE_SHUTDOWN_BIT,\
45 (void *) &(cf_sk)->conn_state)
46#define STATE_IS_PENDING(cf_sk) test_bit(CONN_STATE_PENDING_BIT,\
47 (void *) &(cf_sk)->conn_state)
48#define STATE_IS_PENDING_DESTROY(cf_sk) test_bit(CONN_STATE_PEND_DESTROY_BIT,\
49 (void *) &(cf_sk)->conn_state)
50
51#define SET_STATE_PENDING_DESTROY(cf_sk) set_bit(CONN_STATE_PEND_DESTROY_BIT,\
52 (void *) &(cf_sk)->conn_state)
53#define SET_STATE_OPEN(cf_sk) set_bit(CONN_STATE_OPEN_BIT,\
54 (void *) &(cf_sk)->conn_state)
55#define SET_STATE_CLOSED(cf_sk) clear_bit(CONN_STATE_OPEN_BIT,\
56 (void *) &(cf_sk)->conn_state)
57#define SET_PENDING_ON(cf_sk) set_bit(CONN_STATE_PENDING_BIT,\
58 (void *) &(cf_sk)->conn_state)
59#define SET_PENDING_OFF(cf_sk) clear_bit(CONN_STATE_PENDING_BIT,\
60 (void *) &(cf_sk)->conn_state)
61#define SET_REMOTE_SHUTDOWN(cf_sk) set_bit(CONN_REMOTE_SHUTDOWN_BIT,\
62 (void *) &(cf_sk)->conn_state)
63
64#define SET_REMOTE_SHUTDOWN_OFF(dev) clear_bit(CONN_REMOTE_SHUTDOWN_BIT,\
65 (void *) &(dev)->conn_state)
66#define RX_FLOW_IS_ON(cf_sk) test_bit(RX_FLOW_ON_BIT,\
67 (void *) &(cf_sk)->flow_state)
68#define TX_FLOW_IS_ON(cf_sk) test_bit(TX_FLOW_ON_BIT,\
69 (void *) &(cf_sk)->flow_state)
70
71#define SET_RX_FLOW_OFF(cf_sk) clear_bit(RX_FLOW_ON_BIT,\
72 (void *) &(cf_sk)->flow_state)
73#define SET_RX_FLOW_ON(cf_sk) set_bit(RX_FLOW_ON_BIT,\
74 (void *) &(cf_sk)->flow_state)
75#define SET_TX_FLOW_OFF(cf_sk) clear_bit(TX_FLOW_ON_BIT,\
76 (void *) &(cf_sk)->flow_state)
77#define SET_TX_FLOW_ON(cf_sk) set_bit(TX_FLOW_ON_BIT,\
78 (void *) &(cf_sk)->flow_state)
79
80#define SKT_READ_FLAG 0x01
81#define SKT_WRITE_FLAG 0x02
82static struct dentry *debugfsdir;
83#include <linux/debugfs.h>
84
85#ifdef CONFIG_DEBUG_FS
86struct debug_fs_counter {
87 atomic_t num_open;
88 atomic_t num_close;
89 atomic_t num_init;
90 atomic_t num_init_resp;
91 atomic_t num_init_fail_resp;
92 atomic_t num_deinit;
93 atomic_t num_deinit_resp;
94 atomic_t num_remote_shutdown_ind;
95 atomic_t num_tx_flow_off_ind;
96 atomic_t num_tx_flow_on_ind;
97 atomic_t num_rx_flow_off;
98 atomic_t num_rx_flow_on;
99 atomic_t skb_in_use;
100 atomic_t skb_alloc;
101 atomic_t skb_free;
102};
103static struct debug_fs_counter cnt;
104#define dbfs_atomic_inc(v) atomic_inc(v)
105#define dbfs_atomic_dec(v) atomic_dec(v)
106#else
107#define dbfs_atomic_inc(v)
108#define dbfs_atomic_dec(v)
109#endif
110
111/* The AF_CAIF socket */
112struct caifsock {
113 /* NOTE: sk has to be the first member */
114 struct sock sk;
115 struct cflayer layer;
116 char name[CAIF_LAYER_NAME_SZ];
117 u32 conn_state;
118 u32 flow_state;
119 struct cfpktq *pktq;
120 int file_mode;
121 struct caif_connect_request conn_req;
122 int read_queue_len;
123 /* protect updates of read_queue_len */
124 spinlock_t read_queue_len_lock;
125 struct dentry *debugfs_socket_dir;
126};
127
128static void drain_queue(struct caifsock *cf_sk);
129
130/* Packet Receive Callback function called from CAIF Stack */
131static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
132{
133 struct caifsock *cf_sk;
134 int read_queue_high;
135 cf_sk = container_of(layr, struct caifsock, layer);
136
137 if (!STATE_IS_OPEN(cf_sk)) {
138 /*FIXME: This should be allowed finally!*/
139 pr_debug("CAIF: %s(): called after close request\n", __func__);
140 cfpkt_destroy(pkt);
141 return 0;
142 }
143 /* NOTE: This function may be called in Tasklet context! */
144
145 /* The queue has its own lock */
146 cfpkt_queue(cf_sk->pktq, pkt, 0);
147
148 spin_lock(&cf_sk->read_queue_len_lock);
149 cf_sk->read_queue_len++;
150
151 read_queue_high = (cf_sk->read_queue_len > CHNL_SKT_READ_QUEUE_HIGH);
152 spin_unlock(&cf_sk->read_queue_len_lock);
153
154 if (RX_FLOW_IS_ON(cf_sk) && read_queue_high) {
155 dbfs_atomic_inc(&cnt.num_rx_flow_off);
156 SET_RX_FLOW_OFF(cf_sk);
157
158 /* Send flow off (NOTE: must not sleep) */
159 pr_debug("CAIF: %s():"
160 " sending flow OFF (queue len = %d)\n",
161 __func__,
162 cf_sk->read_queue_len);
163 caif_assert(cf_sk->layer.dn);
164 caif_assert(cf_sk->layer.dn->ctrlcmd);
165
166 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
167 CAIF_MODEMCMD_FLOW_OFF_REQ);
168 }
169
170 /* Signal reader that data is available. */
171
172 wake_up_interruptible(cf_sk->sk.sk_sleep);
173
174 return 0;
175}
176
177/* Packet Flow Control Callback function called from CAIF */
178static void caif_sktflowctrl_cb(struct cflayer *layr,
179 enum caif_ctrlcmd flow,
180 int phyid)
181{
182 struct caifsock *cf_sk;
183
184 /* NOTE: This function may be called in Tasklet context! */
185 pr_debug("CAIF: %s(): flowctrl func called: %s.\n",
186 __func__,
187 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
188 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
189 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT_RSP" :
190 flow == CAIF_CTRLCMD_DEINIT_RSP ? "DEINIT_RSP" :
191 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "INIT_FAIL_RSP" :
192 flow ==
193 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ? "REMOTE_SHUTDOWN" :
194 "UKNOWN CTRL COMMAND");
195
196 if (layr == NULL)
197 return;
198
199 cf_sk = container_of(layr, struct caifsock, layer);
200
201 switch (flow) {
202 case CAIF_CTRLCMD_FLOW_ON_IND:
203 dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
204 /* Signal reader that data is available. */
205 SET_TX_FLOW_ON(cf_sk);
206 wake_up_interruptible(cf_sk->sk.sk_sleep);
207 break;
208
209 case CAIF_CTRLCMD_FLOW_OFF_IND:
210 dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
211 SET_TX_FLOW_OFF(cf_sk);
212 break;
213
214 case CAIF_CTRLCMD_INIT_RSP:
215 dbfs_atomic_inc(&cnt.num_init_resp);
216 /* Signal reader that data is available. */
217 caif_assert(STATE_IS_OPEN(cf_sk));
218 SET_PENDING_OFF(cf_sk);
219 SET_TX_FLOW_ON(cf_sk);
220 wake_up_interruptible(cf_sk->sk.sk_sleep);
221 break;
222
223 case CAIF_CTRLCMD_DEINIT_RSP:
224 dbfs_atomic_inc(&cnt.num_deinit_resp);
225 caif_assert(!STATE_IS_OPEN(cf_sk));
226 SET_PENDING_OFF(cf_sk);
227 if (!STATE_IS_PENDING_DESTROY(cf_sk)) {
228 if (cf_sk->sk.sk_sleep != NULL)
229 wake_up_interruptible(cf_sk->sk.sk_sleep);
230 }
231 dbfs_atomic_inc(&cnt.num_deinit);
232 sock_put(&cf_sk->sk);
233 break;
234
235 case CAIF_CTRLCMD_INIT_FAIL_RSP:
236 dbfs_atomic_inc(&cnt.num_init_fail_resp);
237 caif_assert(STATE_IS_OPEN(cf_sk));
238 SET_STATE_CLOSED(cf_sk);
239 SET_PENDING_OFF(cf_sk);
240 SET_TX_FLOW_OFF(cf_sk);
241 wake_up_interruptible(cf_sk->sk.sk_sleep);
242 break;
243
244 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
245 dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
246 SET_REMOTE_SHUTDOWN(cf_sk);
247 /* Use sk_shutdown to indicate remote shutdown indication */
248 cf_sk->sk.sk_shutdown |= RCV_SHUTDOWN;
249 cf_sk->file_mode = 0;
250 wake_up_interruptible(cf_sk->sk.sk_sleep);
251 break;
252
253 default:
254 pr_debug("CAIF: %s(): Unexpected flow command %d\n",
255 __func__, flow);
256 }
257}
258
259static void skb_destructor(struct sk_buff *skb)
260{
261 dbfs_atomic_inc(&cnt.skb_free);
262 dbfs_atomic_dec(&cnt.skb_in_use);
263}
264
265
266static int caif_recvmsg(struct kiocb *iocb, struct socket *sock,
267 struct msghdr *m, size_t buf_len, int flags)
268
269{
270 struct sock *sk = sock->sk;
271 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
272 struct cfpkt *pkt = NULL;
273 size_t len;
274 int result;
275 struct sk_buff *skb;
276 ssize_t ret = -EIO;
277 int read_queue_low;
278
279 if (cf_sk == NULL) {
280 pr_debug("CAIF: %s(): private_data not set!\n",
281 __func__);
282 ret = -EBADFD;
283 goto read_error;
284 }
285
286 /* Don't do multiple iovec entries yet */
287 if (m->msg_iovlen != 1)
288 return -EOPNOTSUPP;
289
290 if (unlikely(!buf_len))
291 return -EINVAL;
292
293 lock_sock(&(cf_sk->sk));
294
295 caif_assert(cf_sk->pktq);
296
297 if (!STATE_IS_OPEN(cf_sk)) {
298 /* Socket is closed or closing. */
299 if (!STATE_IS_PENDING(cf_sk)) {
300 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
301 __func__);
302 ret = -EPIPE;
303 } else {
304 pr_debug("CAIF: %s(): socket is closing..\n", __func__);
305 ret = -EBADF;
306 }
307 goto read_error;
308 }
309 /* Socket is open or opening. */
310 if (STATE_IS_PENDING(cf_sk)) {
311 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
312
313 if (flags & MSG_DONTWAIT) {
314 /* We can't block. */
315 pr_debug("CAIF: %s():state pending and MSG_DONTWAIT\n",
316 __func__);
317 ret = -EAGAIN;
318 goto read_error;
319 }
320
321 /*
322 * Blocking mode; state is pending and we need to wait
323 * for its conclusion.
324 */
325 release_sock(&cf_sk->sk);
326
327 result =
328 wait_event_interruptible(*cf_sk->sk.sk_sleep,
329 !STATE_IS_PENDING(cf_sk));
330
331 lock_sock(&(cf_sk->sk));
332
333 if (result == -ERESTARTSYS) {
334 pr_debug("CAIF: %s(): wait_event_interruptible"
335 " woken by a signal (1)", __func__);
336 ret = -ERESTARTSYS;
337 goto read_error;
338 }
339 }
340
341 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
342 !STATE_IS_OPEN(cf_sk) ||
343 STATE_IS_PENDING(cf_sk)) {
344
345 pr_debug("CAIF: %s(): socket closed\n",
346 __func__);
347 ret = -ESHUTDOWN;
348 goto read_error;
349 }
350
351 /*
352 * Block if we don't have any received buffers.
353 * The queue has its own lock.
354 */
355 while ((pkt = cfpkt_qpeek(cf_sk->pktq)) == NULL) {
356
357 if (flags & MSG_DONTWAIT) {
358 pr_debug("CAIF: %s(): MSG_DONTWAIT\n", __func__);
359 ret = -EAGAIN;
360 goto read_error;
361 }
362 trace_printk("CAIF: %s() wait_event\n", __func__);
363
364 /* Let writers in. */
365 release_sock(&cf_sk->sk);
366
367 /* Block reader until data arrives or socket is closed. */
368 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
369 cfpkt_qpeek(cf_sk->pktq)
370 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
371 || !STATE_IS_OPEN(cf_sk)) ==
372 -ERESTARTSYS) {
373 pr_debug("CAIF: %s():"
374 " wait_event_interruptible woken by "
375 "a signal, signal_pending(current) = %d\n",
376 __func__,
377 signal_pending(current));
378 return -ERESTARTSYS;
379 }
380
381 trace_printk("CAIF: %s() awake\n", __func__);
382 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
383 pr_debug("CAIF: %s(): "
384 "received remote_shutdown indication\n",
385 __func__);
386 ret = -ESHUTDOWN;
387 goto read_error_no_unlock;
388 }
389
390 /* I want to be alone on cf_sk (except status and queue). */
391 lock_sock(&(cf_sk->sk));
392
393 if (!STATE_IS_OPEN(cf_sk)) {
394 /* Someone closed the link, report error. */
395 pr_debug("CAIF: %s(): remote end shutdown!\n",
396 __func__);
397 ret = -EPIPE;
398 goto read_error;
399 }
400 }
401
402 /* The queue has its own lock. */
403 len = cfpkt_getlen(pkt);
404
405 /* Check max length that can be copied. */
406 if (len <= buf_len)
407 pkt = cfpkt_dequeue(cf_sk->pktq);
408 else {
409 pr_debug("CAIF: %s(): user buffer too small (%ld,%ld)\n",
410 __func__, (long) len, (long) buf_len);
411 if (sock->type == SOCK_SEQPACKET) {
412 ret = -EMSGSIZE;
413 goto read_error;
414 }
415 len = buf_len;
416 }
417
418
419 spin_lock(&cf_sk->read_queue_len_lock);
420 cf_sk->read_queue_len--;
421 read_queue_low = (cf_sk->read_queue_len < CHNL_SKT_READ_QUEUE_LOW);
422 spin_unlock(&cf_sk->read_queue_len_lock);
423
424 if (!RX_FLOW_IS_ON(cf_sk) && read_queue_low) {
425 dbfs_atomic_inc(&cnt.num_rx_flow_on);
426 SET_RX_FLOW_ON(cf_sk);
427
428 /* Send flow on. */
429 pr_debug("CAIF: %s(): sending flow ON (queue len = %d)\n",
430 __func__, cf_sk->read_queue_len);
431 caif_assert(cf_sk->layer.dn);
432 caif_assert(cf_sk->layer.dn->ctrlcmd);
433 (void) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
434 CAIF_MODEMCMD_FLOW_ON_REQ);
435
436 caif_assert(cf_sk->read_queue_len >= 0);
437 }
438
439 skb = cfpkt_tonative(pkt);
440 result = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
441 skb_pull(skb, len);
442
443 if (result) {
444 pr_debug("CAIF: %s(): copy to_iovec failed\n", __func__);
445 cfpkt_destroy(pkt);
446 ret = -EFAULT;
447 goto read_error;
448 }
449
450 /* Free packet and remove from queue */
451 if (skb->len == 0)
452 skb_free_datagram(sk, skb);
453
454 /* Let the others in. */
455 release_sock(&cf_sk->sk);
456 return len;
457
458read_error:
459 release_sock(&cf_sk->sk);
460read_error_no_unlock:
461 return ret;
462}
463
464/* Send a signal as a consequence of sendmsg, sendto or caif_sendmsg. */
465static int caif_sendmsg(struct kiocb *kiocb, struct socket *sock,
466 struct msghdr *msg, size_t len)
467{
468
469 struct sock *sk = sock->sk;
470 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
471 size_t payload_size = msg->msg_iov->iov_len;
472 struct cfpkt *pkt = NULL;
473 struct caif_payload_info info;
474 unsigned char *txbuf;
475 ssize_t ret = -EIO;
476 int result;
477 struct sk_buff *skb;
478 caif_assert(msg->msg_iovlen == 1);
479
480 if (cf_sk == NULL) {
481 pr_debug("CAIF: %s(): private_data not set!\n",
482 __func__);
483 ret = -EBADFD;
484 goto write_error_no_unlock;
485 }
486
487 if (unlikely(msg->msg_iov->iov_base == NULL)) {
488 pr_warning("CAIF: %s(): Buffer is NULL.\n", __func__);
489 ret = -EINVAL;
490 goto write_error_no_unlock;
491 }
492
493 if (payload_size > CAIF_MAX_PAYLOAD_SIZE) {
494 pr_debug("CAIF: %s(): buffer too long\n", __func__);
495 if (sock->type == SOCK_SEQPACKET) {
496 ret = -EINVAL;
497 goto write_error_no_unlock;
498 }
499 payload_size = CAIF_MAX_PAYLOAD_SIZE;
500 }
501
502 /* I want to be alone on cf_sk (except status and queue) */
503 lock_sock(&(cf_sk->sk));
504
505 caif_assert(cf_sk->pktq);
506
507 if (!STATE_IS_OPEN(cf_sk)) {
508 /* Socket is closed or closing */
509 if (!STATE_IS_PENDING(cf_sk)) {
510 pr_debug("CAIF: %s(): socket is closed (by remote)\n",
511 __func__);
512 ret = -EPIPE;
513 } else {
514 pr_debug("CAIF: %s(): socket is closing...\n",
515 __func__);
516 ret = -EBADF;
517 }
518 goto write_error;
519 }
520
521 /* Socket is open or opening */
522 if (STATE_IS_PENDING(cf_sk)) {
523 pr_debug("CAIF: %s(): socket is opening...\n", __func__);
524
525 if (msg->msg_flags & MSG_DONTWAIT) {
526 /* We can't block */
527 trace_printk("CAIF: %s():state pending:"
528 "state=MSG_DONTWAIT\n", __func__);
529 ret = -EAGAIN;
530 goto write_error;
531 }
532 /* Let readers in */
533 release_sock(&cf_sk->sk);
534
535 /*
536 * Blocking mode; state is pending and we need to wait
537 * for its conclusion.
538 */
539 result =
540 wait_event_interruptible(*cf_sk->sk.sk_sleep,
541 !STATE_IS_PENDING(cf_sk));
542 /* I want to be alone on cf_sk (except status and queue) */
543 lock_sock(&(cf_sk->sk));
544
545 if (result == -ERESTARTSYS) {
546 pr_debug("CAIF: %s(): wait_event_interruptible"
547 " woken by a signal (1)", __func__);
548 ret = -ERESTARTSYS;
549 goto write_error;
550 }
551 }
552 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk) ||
553 !STATE_IS_OPEN(cf_sk) ||
554 STATE_IS_PENDING(cf_sk)) {
555
556 pr_debug("CAIF: %s(): socket closed\n",
557 __func__);
558 ret = -ESHUTDOWN;
559 goto write_error;
560 }
561
562 if (!TX_FLOW_IS_ON(cf_sk)) {
563
564 /* Flow is off. Check non-block flag */
565 if (msg->msg_flags & MSG_DONTWAIT) {
566 trace_printk("CAIF: %s(): MSG_DONTWAIT and tx flow off",
567 __func__);
568 ret = -EAGAIN;
569 goto write_error;
570 }
571
572 /* release lock before waiting */
573 release_sock(&cf_sk->sk);
574
575 /* Wait until flow is on or socket is closed */
576 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
577 TX_FLOW_IS_ON(cf_sk)
578 || !STATE_IS_OPEN(cf_sk)
579 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
580 ) == -ERESTARTSYS) {
581 pr_debug("CAIF: %s():"
582 " wait_event_interruptible woken by a signal",
583 __func__);
584 ret = -ERESTARTSYS;
585 goto write_error_no_unlock;
586 }
587
588 /* I want to be alone on cf_sk (except status and queue) */
589 lock_sock(&(cf_sk->sk));
590
591 if (!STATE_IS_OPEN(cf_sk)) {
592 /* someone closed the link, report error */
593 pr_debug("CAIF: %s(): remote end shutdown!\n",
594 __func__);
595 ret = -EPIPE;
596 goto write_error;
597 }
598
599 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
600 pr_debug("CAIF: %s(): "
601 "received remote_shutdown indication\n",
602 __func__);
603 ret = -ESHUTDOWN;
604 goto write_error;
605 }
606 }
607
608 pkt = cfpkt_create(payload_size);
609 skb = (struct sk_buff *)pkt;
610 skb->destructor = skb_destructor;
611 skb->sk = sk;
612 dbfs_atomic_inc(&cnt.skb_alloc);
613 dbfs_atomic_inc(&cnt.skb_in_use);
614 if (cfpkt_raw_append(pkt, (void **) &txbuf, payload_size) < 0) {
615 pr_debug("CAIF: %s(): cfpkt_raw_append failed\n", __func__);
616 cfpkt_destroy(pkt);
617 ret = -EINVAL;
618 goto write_error;
619 }
620
621 /* Copy data into buffer. */
622 if (copy_from_user(txbuf, msg->msg_iov->iov_base, payload_size)) {
623 pr_debug("CAIF: %s(): copy_from_user returned non zero.\n",
624 __func__);
625 cfpkt_destroy(pkt);
626 ret = -EINVAL;
627 goto write_error;
628 }
629 memset(&info, 0, sizeof(info));
630
631 /* Send the packet down the stack. */
632 caif_assert(cf_sk->layer.dn);
633 caif_assert(cf_sk->layer.dn->transmit);
634
635 do {
636 ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
637
638 if (likely((ret >= 0) || (ret != -EAGAIN)))
639 break;
640
641 /* EAGAIN - retry */
642 if (msg->msg_flags & MSG_DONTWAIT) {
643 pr_debug("CAIF: %s(): NONBLOCK and transmit failed,"
644 " error = %ld\n", __func__, (long) ret);
645 ret = -EAGAIN;
646 goto write_error;
647 }
648
649 /* Let readers in */
650 release_sock(&cf_sk->sk);
651
652 /* Wait until flow is on or socket is closed */
653 if (wait_event_interruptible(*cf_sk->sk.sk_sleep,
654 TX_FLOW_IS_ON(cf_sk)
655 || !STATE_IS_OPEN(cf_sk)
656 || STATE_IS_REMOTE_SHUTDOWN(cf_sk)
657 ) == -ERESTARTSYS) {
658 pr_debug("CAIF: %s(): wait_event_interruptible"
659 " woken by a signal", __func__);
660 ret = -ERESTARTSYS;
661 goto write_error_no_unlock;
662 }
663
664 /* I want to be alone on cf_sk (except status and queue) */
665 lock_sock(&(cf_sk->sk));
666
667 } while (ret == -EAGAIN);
668
669 if (ret < 0) {
670 cfpkt_destroy(pkt);
671 pr_debug("CAIF: %s(): transmit failed, error = %ld\n",
672 __func__, (long) ret);
673
674 goto write_error;
675 }
676
677 release_sock(&cf_sk->sk);
678 return payload_size;
679
680write_error:
681 release_sock(&cf_sk->sk);
682write_error_no_unlock:
683 return ret;
684}
685
686static unsigned int caif_poll(struct file *file, struct socket *sock,
687 poll_table *wait)
688{
689 struct sock *sk = sock->sk;
690 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
691 u32 mask = 0;
692 poll_wait(file, sk_sleep(sk), wait);
693 lock_sock(&(cf_sk->sk));
694 if (!STATE_IS_OPEN(cf_sk)) {
695 if (!STATE_IS_PENDING(cf_sk))
696 mask |= POLLHUP;
697 } else {
698 if (cfpkt_qpeek(cf_sk->pktq) != NULL)
699 mask |= (POLLIN | POLLRDNORM);
700 if (TX_FLOW_IS_ON(cf_sk))
701 mask |= (POLLOUT | POLLWRNORM);
702 }
703 release_sock(&cf_sk->sk);
704 trace_printk("CAIF: %s(): poll mask=0x%04x\n",
705 __func__, mask);
706 return mask;
707}
708
709static void drain_queue(struct caifsock *cf_sk)
710{
711 struct cfpkt *pkt = NULL;
712
713 /* Empty the queue */
714 do {
715 /* The queue has its own lock */
716 if (!cf_sk->pktq)
717 break;
718
719 pkt = cfpkt_dequeue(cf_sk->pktq);
720 if (!pkt)
721 break;
722 pr_debug("CAIF: %s(): freeing packet from read queue\n",
723 __func__);
724 cfpkt_destroy(pkt);
725
726 } while (1);
727
728 cf_sk->read_queue_len = 0;
729}
730
731static int setsockopt(struct socket *sock,
732 int lvl, int opt, char __user *ov, unsigned int ol)
733{
734 struct sock *sk = sock->sk;
735 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
736 int prio, linksel;
737 struct ifreq ifreq;
738
739 if (STATE_IS_OPEN(cf_sk)) {
740 pr_debug("CAIF: %s(): setsockopt "
741 "cannot be done on a connected socket\n",
742 __func__);
743 return -ENOPROTOOPT;
744 }
745 switch (opt) {
746 case CAIFSO_LINK_SELECT:
747 if (ol < sizeof(int)) {
748 pr_debug("CAIF: %s(): setsockopt"
749 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
750 return -EINVAL;
751 }
752 if (lvl != SOL_CAIF)
753 goto bad_sol;
754 if (copy_from_user(&linksel, ov, sizeof(int)))
755 return -EINVAL;
756 lock_sock(&(cf_sk->sk));
757 cf_sk->conn_req.link_selector = linksel;
758 release_sock(&cf_sk->sk);
759 return 0;
760
761 case SO_PRIORITY:
762 if (lvl != SOL_SOCKET)
763 goto bad_sol;
764 if (ol < sizeof(int)) {
765 pr_debug("CAIF: %s(): setsockopt"
766 " SO_PRIORITY bad size\n", __func__);
767 return -EINVAL;
768 }
769 if (copy_from_user(&prio, ov, sizeof(int)))
770 return -EINVAL;
771 lock_sock(&(cf_sk->sk));
772 cf_sk->conn_req.priority = prio;
773 pr_debug("CAIF: %s(): Setting sockopt priority=%d\n", __func__,
774 cf_sk->conn_req.priority);
775 release_sock(&cf_sk->sk);
776 return 0;
777
778 case SO_BINDTODEVICE:
779 if (lvl != SOL_SOCKET)
780 goto bad_sol;
781 if (ol < sizeof(struct ifreq)) {
782 pr_debug("CAIF: %s(): setsockopt"
783 " SO_PRIORITY bad size\n", __func__);
784 return -EINVAL;
785 }
786 if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
787 return -EFAULT;
788 lock_sock(&(cf_sk->sk));
789 strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
790 sizeof(cf_sk->conn_req.link_name));
791 cf_sk->conn_req.link_name
792 [sizeof(cf_sk->conn_req.link_name)-1] = 0;
793 release_sock(&cf_sk->sk);
794 return 0;
795
796 case CAIFSO_REQ_PARAM:
797 if (lvl != SOL_CAIF)
798 goto bad_sol;
799 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
800 return -ENOPROTOOPT;
801 if (ol > sizeof(cf_sk->conn_req.param.data))
802 goto req_param_bad_size;
803
804 lock_sock(&(cf_sk->sk));
805 cf_sk->conn_req.param.size = ol;
806 if (copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
807 release_sock(&cf_sk->sk);
808req_param_bad_size:
809 pr_debug("CAIF: %s(): setsockopt"
810 " CAIFSO_CHANNEL_CONFIG bad size\n", __func__);
811 return -EINVAL;
812 }
813
814 release_sock(&cf_sk->sk);
815 return 0;
816
817 default:
818 pr_debug("CAIF: %s(): unhandled option %d\n", __func__, opt);
819 return -EINVAL;
820 }
821
822 return 0;
823bad_sol:
824 pr_debug("CAIF: %s(): setsockopt bad level\n", __func__);
825 return -ENOPROTOOPT;
826
827}
828
829static int caif_connect(struct socket *sock, struct sockaddr *uservaddr,
830 int sockaddr_len, int flags)
831{
832 struct caifsock *cf_sk = NULL;
833 int result = -1;
834 int mode = 0;
835 int ret = -EIO;
836 struct sock *sk = sock->sk;
837 BUG_ON(sk == NULL);
838
839 cf_sk = container_of(sk, struct caifsock, sk);
840
841 trace_printk("CAIF: %s(): cf_sk=%p OPEN=%d, TX_FLOW=%d, RX_FLOW=%d\n",
842 __func__, cf_sk,
843 STATE_IS_OPEN(cf_sk),
844 TX_FLOW_IS_ON(cf_sk), RX_FLOW_IS_ON(cf_sk));
845
846
847 if (sock->type == SOCK_SEQPACKET || sock->type == SOCK_STREAM)
848 sock->state = SS_CONNECTING;
849 else
850 goto out;
851
852 /* I want to be alone on cf_sk (except status and queue) */
853 lock_sock(&(cf_sk->sk));
854
855 if (sockaddr_len != sizeof(struct sockaddr_caif)) {
856 pr_debug("CAIF: %s(): Bad address len (%ld,%lu)\n",
857 __func__, (long) sockaddr_len,
858 (long unsigned) sizeof(struct sockaddr_caif));
859 ret = -EINVAL;
860 goto open_error;
861 }
862
863 if (uservaddr->sa_family != AF_CAIF) {
864 pr_debug("CAIF: %s(): Bad address family (%d)\n",
865 __func__, uservaddr->sa_family);
866 ret = -EAFNOSUPPORT;
867 goto open_error;
868 }
869
870 memcpy(&cf_sk->conn_req.sockaddr, uservaddr,
871 sizeof(struct sockaddr_caif));
872
873 dbfs_atomic_inc(&cnt.num_open);
874 mode = SKT_READ_FLAG | SKT_WRITE_FLAG;
875
876 /* If socket is not open, make sure socket is in fully closed state */
877 if (!STATE_IS_OPEN(cf_sk)) {
878 /* Has link close response been received (if we ever sent it)?*/
879 if (STATE_IS_PENDING(cf_sk)) {
880 /*
881 * Still waiting for close response from remote.
882 * If opened non-blocking, report "would block"
883 */
884 if (flags & O_NONBLOCK) {
885 pr_debug("CAIF: %s(): O_NONBLOCK"
886 " && close pending\n", __func__);
887 ret = -EAGAIN;
888 goto open_error;
889 }
890
891 pr_debug("CAIF: %s(): Wait for close response"
892 " from remote...\n", __func__);
893
894 release_sock(&cf_sk->sk);
895
896 /*
897 * Blocking mode; close is pending and we need to wait
898 * for its conclusion.
899 */
900 result =
901 wait_event_interruptible(*cf_sk->sk.sk_sleep,
902 !STATE_IS_PENDING(cf_sk));
903
904 lock_sock(&(cf_sk->sk));
905 if (result == -ERESTARTSYS) {
906 pr_debug("CAIF: %s(): wait_event_interruptible"
907 "woken by a signal (1)", __func__);
908 ret = -ERESTARTSYS;
909 goto open_error;
910 }
911 }
912 }
913
914 /* socket is now either closed, pending open or open */
915 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
916 /* Open */
917 pr_debug("CAIF: %s(): Socket is already opened (cf_sk=%p)"
918 " check access f_flags = 0x%x file_mode = 0x%x\n",
919 __func__, cf_sk, mode, cf_sk->file_mode);
920
921 } else {
922 /* We are closed or pending open.
923 * If closed: send link setup
924 * If pending open: link setup already sent (we could have been
925 * interrupted by a signal last time)
926 */
927 if (!STATE_IS_OPEN(cf_sk)) {
928 /* First opening of file; connect lower layers: */
929 /* Drain queue (very unlikely) */
930 drain_queue(cf_sk);
931
932 cf_sk->layer.receive = caif_sktrecv_cb;
933 SET_STATE_OPEN(cf_sk);
934 SET_PENDING_ON(cf_sk);
935
936 /* Register this channel. */
937 result =
938 caif_connect_client(&cf_sk->conn_req,
939 &cf_sk->layer);
940 if (result < 0) {
941 pr_debug("CAIF: %s(): can't register channel\n",
942 __func__);
943 ret = -EIO;
944 SET_STATE_CLOSED(cf_sk);
945 SET_PENDING_OFF(cf_sk);
946 goto open_error;
947 }
948 dbfs_atomic_inc(&cnt.num_init);
949 }
950
951 /* If opened non-blocking, report "success".
952 */
953 if (flags & O_NONBLOCK) {
954 pr_debug("CAIF: %s(): O_NONBLOCK success\n",
955 __func__);
956 ret = -EINPROGRESS;
957 cf_sk->sk.sk_err = -EINPROGRESS;
958 goto open_error;
959 }
960
961 trace_printk("CAIF: %s(): Wait for connect response\n",
962 __func__);
963
964 /* release lock before waiting */
965 release_sock(&cf_sk->sk);
966
967 result =
968 wait_event_interruptible(*cf_sk->sk.sk_sleep,
969 !STATE_IS_PENDING(cf_sk));
970
971 lock_sock(&(cf_sk->sk));
972
973 if (result == -ERESTARTSYS) {
974 pr_debug("CAIF: %s(): wait_event_interruptible"
975 "woken by a signal (2)", __func__);
976 ret = -ERESTARTSYS;
977 goto open_error;
978 }
979
980 if (!STATE_IS_OPEN(cf_sk)) {
981 /* Lower layers said "no" */
982 pr_debug("CAIF: %s(): Closed received\n", __func__);
983 ret = -EPIPE;
984 goto open_error;
985 }
986
987 trace_printk("CAIF: %s(): Connect received\n", __func__);
988 }
989 /* Open is ok */
990 cf_sk->file_mode |= mode;
991
992 trace_printk("CAIF: %s(): Connected - file mode = %x\n",
993 __func__, cf_sk->file_mode);
994
995 release_sock(&cf_sk->sk);
996 return 0;
997open_error:
998 sock->state = SS_UNCONNECTED;
999 release_sock(&cf_sk->sk);
1000out:
1001 return ret;
1002}
1003
1004static int caif_shutdown(struct socket *sock, int how)
1005{
1006 struct caifsock *cf_sk = NULL;
1007 int result = 0;
1008 int tx_flow_state_was_on;
1009 struct sock *sk = sock->sk;
1010
1011 trace_printk("CAIF: %s(): enter\n", __func__);
1012 pr_debug("f_flags=%x\n", sock->file->f_flags);
1013
1014 if (how != SHUT_RDWR)
1015 return -EOPNOTSUPP;
1016
1017 cf_sk = container_of(sk, struct caifsock, sk);
1018 if (cf_sk == NULL) {
1019 pr_debug("CAIF: %s(): COULD NOT FIND SOCKET\n", __func__);
1020 return -EBADF;
1021 }
1022
1023 /* I want to be alone on cf_sk (except status queue) */
1024 lock_sock(&(cf_sk->sk));
1025 sock_hold(&cf_sk->sk);
1026
1027 /* IS_CLOSED have double meaning:
1028 * 1) Spontanous Remote Shutdown Request.
1029 * 2) Ack on a channel teardown(disconnect)
1030 * Must clear bit in case we previously received
1031 * remote shudown request.
1032 */
1033 if (STATE_IS_OPEN(cf_sk) && !STATE_IS_PENDING(cf_sk)) {
1034 SET_STATE_CLOSED(cf_sk);
1035 SET_PENDING_ON(cf_sk);
1036 tx_flow_state_was_on = TX_FLOW_IS_ON(cf_sk);
1037 SET_TX_FLOW_OFF(cf_sk);
1038
1039 /* Hold the socket until DEINIT_RSP is received */
1040 sock_hold(&cf_sk->sk);
1041 result = caif_disconnect_client(&cf_sk->layer);
1042
1043 if (result < 0) {
1044 pr_debug("CAIF: %s(): "
1045 "caif_disconnect_client() failed\n",
1046 __func__);
1047 SET_STATE_CLOSED(cf_sk);
1048 SET_PENDING_OFF(cf_sk);
1049 SET_TX_FLOW_OFF(cf_sk);
1050 release_sock(&cf_sk->sk);
1051 sock_put(&cf_sk->sk);
1052 return -EIO;
1053 }
1054
1055 }
1056 if (STATE_IS_REMOTE_SHUTDOWN(cf_sk)) {
1057 SET_PENDING_OFF(cf_sk);
1058 SET_REMOTE_SHUTDOWN_OFF(cf_sk);
1059 }
1060
1061 /*
1062 * Socket is no longer in state pending close,
1063 * and we can release the reference.
1064 */
1065
1066 dbfs_atomic_inc(&cnt.num_close);
1067 drain_queue(cf_sk);
1068 SET_RX_FLOW_ON(cf_sk);
1069 cf_sk->file_mode = 0;
1070 sock_put(&cf_sk->sk);
1071 release_sock(&cf_sk->sk);
1072 if (!result && (sock->file->f_flags & O_NONBLOCK)) {
1073 pr_debug("nonblocking shutdown returing -EAGAIN\n");
1074 return -EAGAIN;
1075 } else
1076 return result;
1077}
1078
1079static ssize_t caif_sock_no_sendpage(struct socket *sock,
1080 struct page *page,
1081 int offset, size_t size, int flags)
1082{
1083 return -EOPNOTSUPP;
1084}
1085
1086/* This function is called as part of close. */
1087static int caif_release(struct socket *sock)
1088{
1089 struct sock *sk = sock->sk;
1090 struct caifsock *cf_sk = NULL;
1091 int res;
1092 caif_assert(sk != NULL);
1093 cf_sk = container_of(sk, struct caifsock, sk);
1094
1095 if (cf_sk->debugfs_socket_dir != NULL)
1096 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
1097
1098 res = caif_shutdown(sock, SHUT_RDWR);
1099 if (res && res != -EINPROGRESS)
1100 return res;
1101
1102 /*
1103 * FIXME: Shutdown should probably be possible to do async
1104 * without flushing queues, allowing reception of frames while
1105 * waiting for DEINIT_IND.
1106 * Release should always block, to allow secure decoupling of
1107 * CAIF stack.
1108 */
1109 if (!(sock->file->f_flags & O_NONBLOCK)) {
1110 res = wait_event_interruptible(*cf_sk->sk.sk_sleep,
1111 !STATE_IS_PENDING(cf_sk));
1112
1113 if (res == -ERESTARTSYS) {
1114 pr_debug("CAIF: %s(): wait_event_interruptible"
1115 "woken by a signal (1)", __func__);
1116 }
1117 }
1118 lock_sock(&(cf_sk->sk));
1119
1120 sock->sk = NULL;
1121
1122 /* Detach the socket from its process context by making it orphan. */
1123 sock_orphan(sk);
1124
1125 /*
1126 * Setting SHUTDOWN_MASK means that both send and receive are shutdown
1127 * for the socket.
1128 */
1129 sk->sk_shutdown = SHUTDOWN_MASK;
1130
1131 /*
1132 * Set the socket state to closed, the TCP_CLOSE macro is used when
1133 * closing any socket.
1134 */
1135
1136 /* Flush out this sockets receive queue. */
1137 drain_queue(cf_sk);
1138
1139 /* Finally release the socket. */
1140 SET_STATE_PENDING_DESTROY(cf_sk);
1141
1142 release_sock(&cf_sk->sk);
1143
1144 sock_put(sk);
1145
1146 /*
1147 * The rest of the cleanup will be handled from the
1148 * caif_sock_destructor
1149 */
1150 return res;
1151}
1152
1153static const struct proto_ops caif_ops = {
1154 .family = PF_CAIF,
1155 .owner = THIS_MODULE,
1156 .release = caif_release,
1157 .bind = sock_no_bind,
1158 .connect = caif_connect,
1159 .socketpair = sock_no_socketpair,
1160 .accept = sock_no_accept,
1161 .getname = sock_no_getname,
1162 .poll = caif_poll,
1163 .ioctl = sock_no_ioctl,
1164 .listen = sock_no_listen,
1165 .shutdown = caif_shutdown,
1166 .setsockopt = setsockopt,
1167 .getsockopt = sock_no_getsockopt,
1168 .sendmsg = caif_sendmsg,
1169 .recvmsg = caif_recvmsg,
1170 .mmap = sock_no_mmap,
1171 .sendpage = caif_sock_no_sendpage,
1172};
1173
1174/* This function is called when a socket is finally destroyed. */
1175static void caif_sock_destructor(struct sock *sk)
1176{
1177 struct caifsock *cf_sk = NULL;
1178 cf_sk = container_of(sk, struct caifsock, sk);
1179 /* Error checks. */
1180 caif_assert(!atomic_read(&sk->sk_wmem_alloc));
1181 caif_assert(sk_unhashed(sk));
1182 caif_assert(!sk->sk_socket);
1183 if (!sock_flag(sk, SOCK_DEAD)) {
1184 pr_debug("CAIF: %s(): 0x%p", __func__, sk);
1185 return;
1186 }
1187
1188 if (STATE_IS_OPEN(cf_sk)) {
1189 pr_debug("CAIF: %s(): socket is opened (cf_sk=%p)"
1190 " file_mode = 0x%x\n", __func__,
1191 cf_sk, cf_sk->file_mode);
1192 return;
1193 }
1194 drain_queue(cf_sk);
1195 kfree(cf_sk->pktq);
1196
1197 trace_printk("CAIF: %s(): caif_sock_destructor: Removing socket %s\n",
1198 __func__, cf_sk->name);
1199 atomic_dec(&caif_nr_socks);
1200}
1201
1202static int caif_create(struct net *net, struct socket *sock, int protocol,
1203 int kern)
1204{
1205 struct sock *sk = NULL;
1206 struct caifsock *cf_sk = NULL;
1207 int result = 0;
1208 static struct proto prot = {.name = "PF_CAIF",
1209 .owner = THIS_MODULE,
1210 .obj_size = sizeof(struct caifsock),
1211 };
1212
1213 /*
1214 * The sock->type specifies the socket type to use.
1215 * in SEQPACKET mode packet boundaries are enforced.
1216 */
1217 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1218 return -ESOCKTNOSUPPORT;
1219
1220 if (net != &init_net)
1221 return -EAFNOSUPPORT;
1222
1223 if (protocol < 0 || protocol >= CAIFPROTO_MAX)
1224 return -EPROTONOSUPPORT;
1225 /*
1226 * Set the socket state to unconnected. The socket state is really
1227 * not used at all in the net/core or socket.c but the
1228 * initialization makes sure that sock->state is not uninitialized.
1229 */
1230 sock->state = SS_UNCONNECTED;
1231
1232 sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
1233 if (!sk)
1234 return -ENOMEM;
1235
1236 cf_sk = container_of(sk, struct caifsock, sk);
1237
1238 /* Store the protocol */
1239 sk->sk_protocol = (unsigned char) protocol;
1240
1241 spin_lock_init(&cf_sk->read_queue_len_lock);
1242
1243 /* Fill in some information concerning the misc socket. */
1244 snprintf(cf_sk->name, sizeof(cf_sk->name), "cf_sk%d",
1245 atomic_read(&caif_nr_socks));
1246
1247 /*
1248 * Lock in order to try to stop someone from opening the socket
1249 * too early.
1250 */
1251 lock_sock(&(cf_sk->sk));
1252
1253 /* Initialize the nozero default sock structure data. */
1254 sock_init_data(sock, sk);
1255 sock->ops = &caif_ops;
1256 sk->sk_destruct = caif_sock_destructor;
1257 sk->sk_sndbuf = caif_sockbuf_size;
1258 sk->sk_rcvbuf = caif_sockbuf_size;
1259
1260 cf_sk->pktq = cfpktq_create();
1261
1262 if (!cf_sk->pktq) {
1263 pr_err("CAIF: %s(): queue create failed.\n", __func__);
1264 result = -ENOMEM;
1265 release_sock(&cf_sk->sk);
1266 goto err_failed;
1267 }
1268 cf_sk->layer.ctrlcmd = caif_sktflowctrl_cb;
1269 SET_STATE_CLOSED(cf_sk);
1270 SET_PENDING_OFF(cf_sk);
1271 SET_TX_FLOW_OFF(cf_sk);
1272 SET_RX_FLOW_ON(cf_sk);
1273
1274 /* Set default options on configuration */
1275 cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
1276 cf_sk->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
1277 cf_sk->conn_req.protocol = protocol;
1278 /* Increase the number of sockets created. */
1279 atomic_inc(&caif_nr_socks);
1280 if (!IS_ERR(debugfsdir)) {
1281 cf_sk->debugfs_socket_dir =
1282 debugfs_create_dir(cf_sk->name, debugfsdir);
1283 debugfs_create_u32("conn_state", S_IRUSR | S_IWUSR,
1284 cf_sk->debugfs_socket_dir, &cf_sk->conn_state);
1285 debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
1286 cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
1287 debugfs_create_u32("read_queue_len", S_IRUSR | S_IWUSR,
1288 cf_sk->debugfs_socket_dir,
1289 (u32 *) &cf_sk->read_queue_len);
1290 debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
1291 cf_sk->debugfs_socket_dir,
1292 (u32 *) &cf_sk->layer.id);
1293 }
1294 release_sock(&cf_sk->sk);
1295 return 0;
1296err_failed:
1297 sk_free(sk);
1298 return result;
1299}
1300
1301static struct net_proto_family caif_family_ops = {
1302 .family = PF_CAIF,
1303 .create = caif_create,
1304 .owner = THIS_MODULE,
1305};
1306
1307static int af_caif_init(void)
1308{
1309 int err;
1310 err = sock_register(&caif_family_ops);
1311
1312 if (!err)
1313 return err;
1314
1315 return 0;
1316}
1317
1318static int __init caif_sktinit_module(void)
1319{
1320 int stat;
1321#ifdef CONFIG_DEBUG_FS
1322 debugfsdir = debugfs_create_dir("chnl_skt", NULL);
1323 if (!IS_ERR(debugfsdir)) {
1324 debugfs_create_u32("skb_inuse", S_IRUSR | S_IWUSR,
1325 debugfsdir,
1326 (u32 *) &cnt.skb_in_use);
1327 debugfs_create_u32("skb_alloc", S_IRUSR | S_IWUSR,
1328 debugfsdir,
1329 (u32 *) &cnt.skb_alloc);
1330 debugfs_create_u32("skb_free", S_IRUSR | S_IWUSR,
1331 debugfsdir,
1332 (u32 *) &cnt.skb_free);
1333 debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
1334 debugfsdir,
1335 (u32 *) &caif_nr_socks);
1336 debugfs_create_u32("num_open", S_IRUSR | S_IWUSR,
1337 debugfsdir,
1338 (u32 *) &cnt.num_open);
1339 debugfs_create_u32("num_close", S_IRUSR | S_IWUSR,
1340 debugfsdir,
1341 (u32 *) &cnt.num_close);
1342 debugfs_create_u32("num_init", S_IRUSR | S_IWUSR,
1343 debugfsdir,
1344 (u32 *) &cnt.num_init);
1345 debugfs_create_u32("num_init_resp", S_IRUSR | S_IWUSR,
1346 debugfsdir,
1347 (u32 *) &cnt.num_init_resp);
1348 debugfs_create_u32("num_init_fail_resp", S_IRUSR | S_IWUSR,
1349 debugfsdir,
1350 (u32 *) &cnt.num_init_fail_resp);
1351 debugfs_create_u32("num_deinit", S_IRUSR | S_IWUSR,
1352 debugfsdir,
1353 (u32 *) &cnt.num_deinit);
1354 debugfs_create_u32("num_deinit_resp", S_IRUSR | S_IWUSR,
1355 debugfsdir,
1356 (u32 *) &cnt.num_deinit_resp);
1357 debugfs_create_u32("num_remote_shutdown_ind",
1358 S_IRUSR | S_IWUSR, debugfsdir,
1359 (u32 *) &cnt.num_remote_shutdown_ind);
1360 debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
1361 debugfsdir,
1362 (u32 *) &cnt.num_tx_flow_off_ind);
1363 debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
1364 debugfsdir,
1365 (u32 *) &cnt.num_tx_flow_on_ind);
1366 debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
1367 debugfsdir,
1368 (u32 *) &cnt.num_rx_flow_off);
1369 debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
1370 debugfsdir,
1371 (u32 *) &cnt.num_rx_flow_on);
1372 }
1373#endif
1374 stat = af_caif_init();
1375 if (stat) {
1376 pr_err("CAIF: %s(): Failed to initialize CAIF socket layer.",
1377 __func__);
1378 return stat;
1379 }
1380 return 0;
1381}
1382
1383static void __exit caif_sktexit_module(void)
1384{
1385 sock_unregister(PF_CAIF);
1386 if (debugfsdir != NULL)
1387 debugfs_remove_recursive(debugfsdir);
1388}
1389
1390module_init(caif_sktinit_module);
1391module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
new file mode 100644
index 000000000000..c873e3d4387c
--- /dev/null
+++ b/net/caif/cfcnfg.c
@@ -0,0 +1,530 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6#include <linux/kernel.h>
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfpkt.h>
11#include <net/caif/cfcnfg.h>
12#include <net/caif/cfctrl.h>
13#include <net/caif/cfmuxl.h>
14#include <net/caif/cffrml.h>
15#include <net/caif/cfserl.h>
16#include <net/caif/cfsrvl.h>
17
18#include <linux/module.h>
19#include <asm/atomic.h>
20
21#define MAX_PHY_LAYERS 7
22#define PHY_NAME_LEN 20
23
24#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
25
26/* Information about CAIF physical interfaces held by Config Module in order
27 * to manage physical interfaces
28 */
29struct cfcnfg_phyinfo {
30 /* Pointer to the layer below the MUX (framing layer) */
31 struct cflayer *frm_layer;
32 /* Pointer to the lowest actual physical layer */
33 struct cflayer *phy_layer;
34 /* Unique identifier of the physical interface */
35 unsigned int id;
36 /* Preference of the physical in interface */
37 enum cfcnfg_phy_preference pref;
38
39 /* Reference count, number of channels using the device */
40 int phy_ref_count;
41
42 /* Information about the physical device */
43 struct dev_info dev_info;
44};
45
46struct cfcnfg {
47 struct cflayer layer;
48 struct cflayer *ctrl;
49 struct cflayer *mux;
50 u8 last_phyid;
51 struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
52};
53
54static void cncfg_linkup_rsp(struct cflayer *layer, u8 linkid,
55 enum cfctrl_srv serv, u8 phyid,
56 struct cflayer *adapt_layer);
57static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid,
58 struct cflayer *client_layer);
59static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
60 struct cflayer *adapt_layer);
61static void cfctrl_resp_func(void);
62static void cfctrl_enum_resp(void);
63
64struct cfcnfg *cfcnfg_create(void)
65{
66 struct cfcnfg *this;
67 struct cfctrl_rsp *resp;
68 /* Initiate this layer */
69 this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
70 if (!this) {
71 pr_warning("CAIF: %s(): Out of memory\n", __func__);
72 return NULL;
73 }
74 memset(this, 0, sizeof(struct cfcnfg));
75 this->mux = cfmuxl_create();
76 if (!this->mux)
77 goto out_of_mem;
78 this->ctrl = cfctrl_create();
79 if (!this->ctrl)
80 goto out_of_mem;
81 /* Initiate response functions */
82 resp = cfctrl_get_respfuncs(this->ctrl);
83 resp->enum_rsp = cfctrl_enum_resp;
84 resp->linkerror_ind = cfctrl_resp_func;
85 resp->linkdestroy_rsp = cncfg_linkdestroy_rsp;
86 resp->sleep_rsp = cfctrl_resp_func;
87 resp->wake_rsp = cfctrl_resp_func;
88 resp->restart_rsp = cfctrl_resp_func;
89 resp->radioset_rsp = cfctrl_resp_func;
90 resp->linksetup_rsp = cncfg_linkup_rsp;
91 resp->reject_rsp = cncfg_reject_rsp;
92
93 this->last_phyid = 1;
94
95 cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
96 layer_set_dn(this->ctrl, this->mux);
97 layer_set_up(this->ctrl, this);
98 return this;
99out_of_mem:
100 pr_warning("CAIF: %s(): Out of memory\n", __func__);
101 kfree(this->mux);
102 kfree(this->ctrl);
103 kfree(this);
104 return NULL;
105}
106EXPORT_SYMBOL(cfcnfg_create);
107
108void cfcnfg_remove(struct cfcnfg *cfg)
109{
110 if (cfg) {
111 kfree(cfg->mux);
112 kfree(cfg->ctrl);
113 kfree(cfg);
114 }
115}
116
117static void cfctrl_resp_func(void)
118{
119}
120
121static void cfctrl_enum_resp(void)
122{
123}
124
125struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
126 enum cfcnfg_phy_preference phy_pref)
127{
128 u16 i;
129
130 /* Try to match with specified preference */
131 for (i = 1; i < MAX_PHY_LAYERS; i++) {
132 if (cnfg->phy_layers[i].id == i &&
133 cnfg->phy_layers[i].pref == phy_pref &&
134 cnfg->phy_layers[i].frm_layer != NULL) {
135 caif_assert(cnfg->phy_layers != NULL);
136 caif_assert(cnfg->phy_layers[i].id == i);
137 return &cnfg->phy_layers[i].dev_info;
138 }
139 }
140 /* Otherwise just return something */
141 for (i = 1; i < MAX_PHY_LAYERS; i++) {
142 if (cnfg->phy_layers[i].id == i) {
143 caif_assert(cnfg->phy_layers != NULL);
144 caif_assert(cnfg->phy_layers[i].id == i);
145 return &cnfg->phy_layers[i].dev_info;
146 }
147 }
148
149 return NULL;
150}
151
152static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
153 u8 phyid)
154{
155 int i;
156 /* Try to match with specified preference */
157 for (i = 0; i < MAX_PHY_LAYERS; i++)
158 if (cnfg->phy_layers[i].frm_layer != NULL &&
159 cnfg->phy_layers[i].id == phyid)
160 return &cnfg->phy_layers[i];
161 return NULL;
162}
163
164int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
165{
166 int i;
167
168 /* Try to match with specified name */
169 for (i = 0; i < MAX_PHY_LAYERS; i++) {
170 if (cnfg->phy_layers[i].frm_layer != NULL
171 && strcmp(cnfg->phy_layers[i].phy_layer->name,
172 name) == 0)
173 return cnfg->phy_layers[i].frm_layer->id;
174 }
175 return 0;
176}
177
178/*
179 * NOTE: What happens on destroy failure:
180 * 1a) No response - Too early
181 * This will not happen because enumerate has already
182 * completed.
183 * 1b) No response - FATAL
184 * Not handled, but this should be a CAIF PROTOCOL ERROR
185 * Modem error, response is really expected - this
186 * case is not really handled.
187 * 2) O/E-bit indicate error
188 * Ignored - this link is destroyed anyway.
189 * 3) Not able to match on request
190 * Not handled, but this should be a CAIF PROTOCOL ERROR
191 * 4) Link-Error - (no response)
192 * Not handled, but this should be a CAIF PROTOCOL ERROR
193 */
194
195int cfcnfg_del_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
196{
197 u8 channel_id = 0;
198 int ret = 0;
199 struct cfcnfg_phyinfo *phyinfo = NULL;
200 u8 phyid = 0;
201
202 caif_assert(adap_layer != NULL);
203 channel_id = adap_layer->id;
204 if (channel_id == 0) {
205 pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
206 ret = -ENOTCONN;
207 goto end;
208 }
209
210 if (adap_layer->dn == NULL) {
211 pr_err("CAIF: %s():adap_layer->dn is NULL\n", __func__);
212 ret = -ENODEV;
213 goto end;
214 }
215
216 if (adap_layer->dn != NULL)
217 phyid = cfsrvl_getphyid(adap_layer->dn);
218
219 phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
220 if (phyinfo == NULL) {
221 pr_warning("CAIF: %s(): No interface to send disconnect to\n",
222 __func__);
223 ret = -ENODEV;
224 goto end;
225 }
226
227 if (phyinfo->id != phyid
228 || phyinfo->phy_layer->id != phyid
229 || phyinfo->frm_layer->id != phyid) {
230
231 pr_err("CAIF: %s(): Inconsistency in phy registration\n",
232 __func__);
233 ret = -EINVAL;
234 goto end;
235 }
236
237 ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
238
239end:
240 if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
241 phyinfo->phy_layer != NULL &&
242 phyinfo->phy_layer->modemcmd != NULL) {
243 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
244 _CAIF_MODEMCMD_PHYIF_USELESS);
245 }
246 return ret;
247
248}
249EXPORT_SYMBOL(cfcnfg_del_adapt_layer);
250
251static void cncfg_linkdestroy_rsp(struct cflayer *layer, u8 linkid,
252 struct cflayer *client_layer)
253{
254 struct cfcnfg *cnfg = container_obj(layer);
255 struct cflayer *servl;
256
257 /*
258 * 1) Remove service from the MUX layer. The MUX must
259 * guarante that no more payload sent "upwards" (receive)
260 */
261 servl = cfmuxl_remove_uplayer(cnfg->mux, linkid);
262
263 if (servl == NULL) {
264 pr_err("CAIF: %s(): PROTOCOL ERROR "
265 "- Error removing service_layer Linkid(%d)",
266 __func__, linkid);
267 return;
268 }
269 caif_assert(linkid == servl->id);
270
271 if (servl != client_layer && servl->up != client_layer) {
272 pr_err("CAIF: %s(): Error removing service_layer "
273 "Linkid(%d) %p %p",
274 __func__, linkid, (void *) servl,
275 (void *) client_layer);
276 return;
277 }
278
279 /*
280 * 2) DEINIT_RSP must guarantee that no more packets are transmitted
281 * from client (adap_layer) when it returns.
282 */
283
284 if (servl->ctrlcmd == NULL) {
285 pr_err("CAIF: %s(): Error servl->ctrlcmd == NULL", __func__);
286 return;
287 }
288
289 servl->ctrlcmd(servl, CAIF_CTRLCMD_DEINIT_RSP, 0);
290
291 /* 3) It is now safe to destroy the service layer. */
292 cfservl_destroy(servl);
293}
294
295/*
296 * NOTE: What happens on linksetup failure:
297 * 1a) No response - Too early
298 * This will not happen because enumerate is secured
299 * before using interface.
300 * 1b) No response - FATAL
301 * Not handled, but this should be a CAIF PROTOCOL ERROR
302 * Modem error, response is really expected - this case is
303 * not really handled.
304 * 2) O/E-bit indicate error
305 * Handled in cnfg_reject_rsp
306 * 3) Not able to match on request
307 * Not handled, but this should be a CAIF PROTOCOL ERROR
308 * 4) Link-Error - (no response)
309 * Not handled, but this should be a CAIF PROTOCOL ERROR
310 */
311
312int
313cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
314 struct cfctrl_link_param *param,
315 struct cflayer *adap_layer)
316{
317 struct cflayer *frml;
318 if (adap_layer == NULL) {
319 pr_err("CAIF: %s(): adap_layer is zero", __func__);
320 return -EINVAL;
321 }
322 if (adap_layer->receive == NULL) {
323 pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
324 return -EINVAL;
325 }
326 if (adap_layer->ctrlcmd == NULL) {
327 pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
328 return -EINVAL;
329 }
330 frml = cnfg->phy_layers[param->phyid].frm_layer;
331 if (frml == NULL) {
332 pr_err("CAIF: %s(): Specified PHY type does not exist!",
333 __func__);
334 return -ENODEV;
335 }
336 caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
337 caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
338 param->phyid);
339 caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
340 param->phyid);
341 /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
342 cfctrl_enum_req(cnfg->ctrl, param->phyid);
343 cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
344 return 0;
345}
346EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
347
348static void cncfg_reject_rsp(struct cflayer *layer, u8 linkid,
349 struct cflayer *adapt_layer)
350{
351 if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
352 adapt_layer->ctrlcmd(adapt_layer,
353 CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
354}
355
356static void
357cncfg_linkup_rsp(struct cflayer *layer, u8 linkid, enum cfctrl_srv serv,
358 u8 phyid, struct cflayer *adapt_layer)
359{
360 struct cfcnfg *cnfg = container_obj(layer);
361 struct cflayer *servicel = NULL;
362 struct cfcnfg_phyinfo *phyinfo;
363 if (adapt_layer == NULL) {
364 pr_err("CAIF: %s(): PROTOCOL ERROR "
365 "- LinkUp Request/Response did not match\n", __func__);
366 return;
367 }
368
369 caif_assert(cnfg != NULL);
370 caif_assert(phyid != 0);
371 phyinfo = &cnfg->phy_layers[phyid];
372 caif_assert(phyinfo != NULL);
373 caif_assert(phyinfo->id == phyid);
374 caif_assert(phyinfo->phy_layer != NULL);
375 caif_assert(phyinfo->phy_layer->id == phyid);
376
377 if (phyinfo != NULL &&
378 phyinfo->phy_ref_count++ == 0 &&
379 phyinfo->phy_layer != NULL &&
380 phyinfo->phy_layer->modemcmd != NULL) {
381 caif_assert(phyinfo->phy_layer->id == phyid);
382 phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
383 _CAIF_MODEMCMD_PHYIF_USEFULL);
384
385 }
386 adapt_layer->id = linkid;
387
388 switch (serv) {
389 case CFCTRL_SRV_VEI:
390 servicel = cfvei_create(linkid, &phyinfo->dev_info);
391 break;
392 case CFCTRL_SRV_DATAGRAM:
393 servicel = cfdgml_create(linkid, &phyinfo->dev_info);
394 break;
395 case CFCTRL_SRV_RFM:
396 servicel = cfrfml_create(linkid, &phyinfo->dev_info);
397 break;
398 case CFCTRL_SRV_UTIL:
399 servicel = cfutill_create(linkid, &phyinfo->dev_info);
400 break;
401 case CFCTRL_SRV_VIDEO:
402 servicel = cfvidl_create(linkid, &phyinfo->dev_info);
403 break;
404 case CFCTRL_SRV_DBG:
405 servicel = cfdbgl_create(linkid, &phyinfo->dev_info);
406 break;
407 default:
408 pr_err("CAIF: %s(): Protocol error. "
409 "Link setup response - unknown channel type\n",
410 __func__);
411 return;
412 }
413 if (!servicel) {
414 pr_warning("CAIF: %s(): Out of memory\n", __func__);
415 return;
416 }
417 layer_set_dn(servicel, cnfg->mux);
418 cfmuxl_set_uplayer(cnfg->mux, servicel, linkid);
419 layer_set_up(servicel, adapt_layer);
420 layer_set_dn(adapt_layer, servicel);
421 servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
422}
423
424void
425cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
426 void *dev, struct cflayer *phy_layer, u16 *phyid,
427 enum cfcnfg_phy_preference pref,
428 bool fcs, bool stx)
429{
430 struct cflayer *frml;
431 struct cflayer *phy_driver = NULL;
432 int i;
433
434
435 if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
436 *phyid = cnfg->last_phyid;
437
438 /* range: * 1..(MAX_PHY_LAYERS-1) */
439 cnfg->last_phyid =
440 (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
441 } else {
442 *phyid = 0;
443 for (i = 1; i < MAX_PHY_LAYERS; i++) {
444 if (cnfg->phy_layers[i].frm_layer == NULL) {
445 *phyid = i;
446 break;
447 }
448 }
449 }
450 if (*phyid == 0) {
451 pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
452 return;
453 }
454
455 switch (phy_type) {
456 case CFPHYTYPE_FRAG:
457 phy_driver =
458 cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
459 if (!phy_driver) {
460 pr_warning("CAIF: %s(): Out of memory\n", __func__);
461 return;
462 }
463
464 break;
465 case CFPHYTYPE_CAIF:
466 phy_driver = NULL;
467 break;
468 default:
469 pr_err("CAIF: %s(): %d", __func__, phy_type);
470 return;
471 break;
472 }
473
474 phy_layer->id = *phyid;
475 cnfg->phy_layers[*phyid].pref = pref;
476 cnfg->phy_layers[*phyid].id = *phyid;
477 cnfg->phy_layers[*phyid].dev_info.id = *phyid;
478 cnfg->phy_layers[*phyid].dev_info.dev = dev;
479 cnfg->phy_layers[*phyid].phy_layer = phy_layer;
480 cnfg->phy_layers[*phyid].phy_ref_count = 0;
481 phy_layer->type = phy_type;
482 frml = cffrml_create(*phyid, fcs);
483 if (!frml) {
484 pr_warning("CAIF: %s(): Out of memory\n", __func__);
485 return;
486 }
487 cnfg->phy_layers[*phyid].frm_layer = frml;
488 cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
489 layer_set_up(frml, cnfg->mux);
490
491 if (phy_driver != NULL) {
492 phy_driver->id = *phyid;
493 layer_set_dn(frml, phy_driver);
494 layer_set_up(phy_driver, frml);
495 layer_set_dn(phy_driver, phy_layer);
496 layer_set_up(phy_layer, phy_driver);
497 } else {
498 layer_set_dn(frml, phy_layer);
499 layer_set_up(phy_layer, frml);
500 }
501}
502EXPORT_SYMBOL(cfcnfg_add_phy_layer);
503
504int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
505{
506 struct cflayer *frml, *frml_dn;
507 u16 phyid;
508 phyid = phy_layer->id;
509 caif_assert(phyid == cnfg->phy_layers[phyid].id);
510 caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
511 caif_assert(phy_layer->id == phyid);
512 caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
513
514 memset(&cnfg->phy_layers[phy_layer->id], 0,
515 sizeof(struct cfcnfg_phyinfo));
516 frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
517 frml_dn = frml->dn;
518 cffrml_set_uplayer(frml, NULL);
519 cffrml_set_dnlayer(frml, NULL);
520 kfree(frml);
521
522 if (phy_layer != frml_dn) {
523 layer_set_up(frml_dn, NULL);
524 layer_set_dn(frml_dn, NULL);
525 kfree(frml_dn);
526 }
527 layer_set_up(phy_layer, NULL);
528 return 0;
529}
530EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
new file mode 100644
index 000000000000..11f80140f3cb
--- /dev/null
+++ b/net/caif/cfctrl.c
@@ -0,0 +1,664 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfpkt.h>
12#include <net/caif/cfctrl.h>
13
14#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
15#define UTILITY_NAME_LENGTH 16
16#define CFPKT_CTRL_PKT_LEN 20
17
18
19#ifdef CAIF_NO_LOOP
20static int handle_loop(struct cfctrl *ctrl,
21 int cmd, struct cfpkt *pkt){
22 return CAIF_FAILURE;
23}
24#else
25static int handle_loop(struct cfctrl *ctrl,
26 int cmd, struct cfpkt *pkt);
27#endif
28static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
29static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
30 int phyid);
31
32
33struct cflayer *cfctrl_create(void)
34{
35 struct cfctrl *this =
36 kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
37 if (!this) {
38 pr_warning("CAIF: %s(): Out of memory\n", __func__);
39 return NULL;
40 }
41 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
42 memset(this, 0, sizeof(*this));
43 spin_lock_init(&this->info_list_lock);
44 atomic_set(&this->req_seq_no, 1);
45 atomic_set(&this->rsp_seq_no, 1);
46 this->serv.dev_info.id = 0xff;
47 this->serv.layer.id = 0;
48 this->serv.layer.receive = cfctrl_recv;
49 sprintf(this->serv.layer.name, "ctrl");
50 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
51 spin_lock_init(&this->loop_linkid_lock);
52 this->loop_linkid = 1;
53 return &this->serv.layer;
54}
55
56static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
57{
58 bool eq =
59 p1->linktype == p2->linktype &&
60 p1->priority == p2->priority &&
61 p1->phyid == p2->phyid &&
62 p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
63
64 if (!eq)
65 return false;
66
67 switch (p1->linktype) {
68 case CFCTRL_SRV_VEI:
69 return true;
70 case CFCTRL_SRV_DATAGRAM:
71 return p1->u.datagram.connid == p2->u.datagram.connid;
72 case CFCTRL_SRV_RFM:
73 return
74 p1->u.rfm.connid == p2->u.rfm.connid &&
75 strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
76 case CFCTRL_SRV_UTIL:
77 return
78 p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
79 && p1->u.utility.fifosize_bufs ==
80 p2->u.utility.fifosize_bufs
81 && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
82 && p1->u.utility.paramlen == p2->u.utility.paramlen
83 && memcmp(p1->u.utility.params, p2->u.utility.params,
84 p1->u.utility.paramlen) == 0;
85
86 case CFCTRL_SRV_VIDEO:
87 return p1->u.video.connid == p2->u.video.connid;
88 case CFCTRL_SRV_DBG:
89 return true;
90 case CFCTRL_SRV_DECM:
91 return false;
92 default:
93 return false;
94 }
95 return false;
96}
97
98bool cfctrl_req_eq(struct cfctrl_request_info *r1,
99 struct cfctrl_request_info *r2)
100{
101 if (r1->cmd != r2->cmd)
102 return false;
103 if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
104 return param_eq(&r1->param, &r2->param);
105 else
106 return r1->channel_id == r2->channel_id;
107}
108
109/* Insert request at the end */
110void cfctrl_insert_req(struct cfctrl *ctrl,
111 struct cfctrl_request_info *req)
112{
113 struct cfctrl_request_info *p;
114 spin_lock(&ctrl->info_list_lock);
115 req->next = NULL;
116 atomic_inc(&ctrl->req_seq_no);
117 req->sequence_no = atomic_read(&ctrl->req_seq_no);
118 if (ctrl->first_req == NULL) {
119 ctrl->first_req = req;
120 spin_unlock(&ctrl->info_list_lock);
121 return;
122 }
123 p = ctrl->first_req;
124 while (p->next != NULL)
125 p = p->next;
126 p->next = req;
127 spin_unlock(&ctrl->info_list_lock);
128}
129
130static void cfctrl_insert_req2(struct cfctrl *ctrl, enum cfctrl_cmd cmd,
131 u8 linkid, struct cflayer *user_layer)
132{
133 struct cfctrl_request_info *req = kmalloc(sizeof(*req), GFP_KERNEL);
134 if (!req) {
135 pr_warning("CAIF: %s(): Out of memory\n", __func__);
136 return;
137 }
138 req->client_layer = user_layer;
139 req->cmd = cmd;
140 req->channel_id = linkid;
141 cfctrl_insert_req(ctrl, req);
142}
143
144/* Compare and remove request */
145struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
146 struct cfctrl_request_info *req)
147{
148 struct cfctrl_request_info *p;
149 struct cfctrl_request_info *ret;
150
151 spin_lock(&ctrl->info_list_lock);
152 if (ctrl->first_req == NULL) {
153 spin_unlock(&ctrl->info_list_lock);
154 return NULL;
155 }
156
157 if (cfctrl_req_eq(req, ctrl->first_req)) {
158 ret = ctrl->first_req;
159 caif_assert(ctrl->first_req);
160 atomic_set(&ctrl->rsp_seq_no,
161 ctrl->first_req->sequence_no);
162 ctrl->first_req = ctrl->first_req->next;
163 spin_unlock(&ctrl->info_list_lock);
164 return ret;
165 }
166
167 p = ctrl->first_req;
168
169 while (p->next != NULL) {
170 if (cfctrl_req_eq(req, p->next)) {
171 pr_warning("CAIF: %s(): Requests are not "
172 "received in order\n",
173 __func__);
174 ret = p->next;
175 atomic_set(&ctrl->rsp_seq_no,
176 p->next->sequence_no);
177 p->next = p->next->next;
178 spin_unlock(&ctrl->info_list_lock);
179 return ret;
180 }
181 p = p->next;
182 }
183 spin_unlock(&ctrl->info_list_lock);
184
185 pr_warning("CAIF: %s(): Request does not match\n",
186 __func__);
187 return NULL;
188}
189
190struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
191{
192 struct cfctrl *this = container_obj(layer);
193 return &this->res;
194}
195
196void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
197{
198 this->dn = dn;
199}
200
201void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
202{
203 this->up = up;
204}
205
206static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
207{
208 info->hdr_len = 0;
209 info->channel_id = cfctrl->serv.layer.id;
210 info->dev_info = &cfctrl->serv.dev_info;
211}
212
213void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
214{
215 struct cfctrl *cfctrl = container_obj(layer);
216 int ret;
217 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
218 if (!pkt) {
219 pr_warning("CAIF: %s(): Out of memory\n", __func__);
220 return;
221 }
222 caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
223 init_info(cfpkt_info(pkt), cfctrl);
224 cfpkt_info(pkt)->dev_info->id = physlinkid;
225 cfctrl->serv.dev_info.id = physlinkid;
226 cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
227 cfpkt_addbdy(pkt, physlinkid);
228 ret =
229 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
230 if (ret < 0) {
231 pr_err("CAIF: %s(): Could not transmit enum message\n",
232 __func__);
233 cfpkt_destroy(pkt);
234 }
235}
236
237void cfctrl_linkup_request(struct cflayer *layer,
238 struct cfctrl_link_param *param,
239 struct cflayer *user_layer)
240{
241 struct cfctrl *cfctrl = container_obj(layer);
242 u32 tmp32;
243 u16 tmp16;
244 u8 tmp8;
245 struct cfctrl_request_info *req;
246 int ret;
247 char utility_name[16];
248 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
249 if (!pkt) {
250 pr_warning("CAIF: %s(): Out of memory\n", __func__);
251 return;
252 }
253 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
254 cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
255 cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
256 cfpkt_addbdy(pkt, param->endpoint & 0x03);
257
258 switch (param->linktype) {
259 case CFCTRL_SRV_VEI:
260 break;
261 case CFCTRL_SRV_VIDEO:
262 cfpkt_addbdy(pkt, (u8) param->u.video.connid);
263 break;
264 case CFCTRL_SRV_DBG:
265 break;
266 case CFCTRL_SRV_DATAGRAM:
267 tmp32 = cpu_to_le32(param->u.datagram.connid);
268 cfpkt_add_body(pkt, &tmp32, 4);
269 break;
270 case CFCTRL_SRV_RFM:
271 /* Construct a frame, convert DatagramConnectionID to network
272 * format long and copy it out...
273 */
274 tmp32 = cpu_to_le32(param->u.rfm.connid);
275 cfpkt_add_body(pkt, &tmp32, 4);
276 /* Add volume name, including zero termination... */
277 cfpkt_add_body(pkt, param->u.rfm.volume,
278 strlen(param->u.rfm.volume) + 1);
279 break;
280 case CFCTRL_SRV_UTIL:
281 tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
282 cfpkt_add_body(pkt, &tmp16, 2);
283 tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
284 cfpkt_add_body(pkt, &tmp16, 2);
285 memset(utility_name, 0, sizeof(utility_name));
286 strncpy(utility_name, param->u.utility.name,
287 UTILITY_NAME_LENGTH - 1);
288 cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
289 tmp8 = param->u.utility.paramlen;
290 cfpkt_add_body(pkt, &tmp8, 1);
291 cfpkt_add_body(pkt, param->u.utility.params,
292 param->u.utility.paramlen);
293 break;
294 default:
295 pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
296 __func__, param->linktype);
297 }
298 req = kmalloc(sizeof(*req), GFP_KERNEL);
299 if (!req) {
300 pr_warning("CAIF: %s(): Out of memory\n", __func__);
301 return;
302 }
303 memset(req, 0, sizeof(*req));
304 req->client_layer = user_layer;
305 req->cmd = CFCTRL_CMD_LINK_SETUP;
306 req->param = *param;
307 cfctrl_insert_req(cfctrl, req);
308 init_info(cfpkt_info(pkt), cfctrl);
309 cfpkt_info(pkt)->dev_info->id = param->phyid;
310 ret =
311 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
312 if (ret < 0) {
313 pr_err("CAIF: %s(): Could not transmit linksetup request\n",
314 __func__);
315 cfpkt_destroy(pkt);
316 }
317}
318
319int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
320 struct cflayer *client)
321{
322 int ret;
323 struct cfctrl *cfctrl = container_obj(layer);
324 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
325 if (!pkt) {
326 pr_warning("CAIF: %s(): Out of memory\n", __func__);
327 return -ENOMEM;
328 }
329 cfctrl_insert_req2(cfctrl, CFCTRL_CMD_LINK_DESTROY, channelid, client);
330 cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
331 cfpkt_addbdy(pkt, channelid);
332 init_info(cfpkt_info(pkt), cfctrl);
333 ret =
334 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
335 if (ret < 0) {
336 pr_err("CAIF: %s(): Could not transmit link-down request\n",
337 __func__);
338 cfpkt_destroy(pkt);
339 }
340 return ret;
341}
342
343void cfctrl_sleep_req(struct cflayer *layer)
344{
345 int ret;
346 struct cfctrl *cfctrl = container_obj(layer);
347 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
348 if (!pkt) {
349 pr_warning("CAIF: %s(): Out of memory\n", __func__);
350 return;
351 }
352 cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
353 init_info(cfpkt_info(pkt), cfctrl);
354 ret =
355 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
356 if (ret < 0)
357 cfpkt_destroy(pkt);
358}
359
360void cfctrl_wake_req(struct cflayer *layer)
361{
362 int ret;
363 struct cfctrl *cfctrl = container_obj(layer);
364 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
365 if (!pkt) {
366 pr_warning("CAIF: %s(): Out of memory\n", __func__);
367 return;
368 }
369 cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
370 init_info(cfpkt_info(pkt), cfctrl);
371 ret =
372 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
373 if (ret < 0)
374 cfpkt_destroy(pkt);
375}
376
377void cfctrl_getstartreason_req(struct cflayer *layer)
378{
379 int ret;
380 struct cfctrl *cfctrl = container_obj(layer);
381 struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
382 if (!pkt) {
383 pr_warning("CAIF: %s(): Out of memory\n", __func__);
384 return;
385 }
386 cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
387 init_info(cfpkt_info(pkt), cfctrl);
388 ret =
389 cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
390 if (ret < 0)
391 cfpkt_destroy(pkt);
392}
393
394
395static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
396{
397 u8 cmdrsp;
398 u8 cmd;
399 int ret = -1;
400 u16 tmp16;
401 u8 len;
402 u8 param[255];
403 u8 linkid;
404 struct cfctrl *cfctrl = container_obj(layer);
405 struct cfctrl_request_info rsp, *req;
406
407
408 cfpkt_extr_head(pkt, &cmdrsp, 1);
409 cmd = cmdrsp & CFCTRL_CMD_MASK;
410 if (cmd != CFCTRL_CMD_LINK_ERR
411 && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
412 if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE) {
413 pr_info("CAIF: %s() CAIF Protocol error:"
414 "Response bit not set\n", __func__);
415 goto error;
416 }
417 }
418
419 switch (cmd) {
420 case CFCTRL_CMD_LINK_SETUP:
421 {
422 enum cfctrl_srv serv;
423 enum cfctrl_srv servtype;
424 u8 endpoint;
425 u8 physlinkid;
426 u8 prio;
427 u8 tmp;
428 u32 tmp32;
429 u8 *cp;
430 int i;
431 struct cfctrl_link_param linkparam;
432 memset(&linkparam, 0, sizeof(linkparam));
433
434 cfpkt_extr_head(pkt, &tmp, 1);
435
436 serv = tmp & CFCTRL_SRV_MASK;
437 linkparam.linktype = serv;
438
439 servtype = tmp >> 4;
440 linkparam.chtype = servtype;
441
442 cfpkt_extr_head(pkt, &tmp, 1);
443 physlinkid = tmp & 0x07;
444 prio = tmp >> 3;
445
446 linkparam.priority = prio;
447 linkparam.phyid = physlinkid;
448 cfpkt_extr_head(pkt, &endpoint, 1);
449 linkparam.endpoint = endpoint & 0x03;
450
451 switch (serv) {
452 case CFCTRL_SRV_VEI:
453 case CFCTRL_SRV_DBG:
454 /* Link ID */
455 cfpkt_extr_head(pkt, &linkid, 1);
456 break;
457 case CFCTRL_SRV_VIDEO:
458 cfpkt_extr_head(pkt, &tmp, 1);
459 linkparam.u.video.connid = tmp;
460 /* Link ID */
461 cfpkt_extr_head(pkt, &linkid, 1);
462 break;
463
464 case CFCTRL_SRV_DATAGRAM:
465 cfpkt_extr_head(pkt, &tmp32, 4);
466 linkparam.u.datagram.connid =
467 le32_to_cpu(tmp32);
468 /* Link ID */
469 cfpkt_extr_head(pkt, &linkid, 1);
470 break;
471 case CFCTRL_SRV_RFM:
472 /* Construct a frame, convert
473 * DatagramConnectionID
474 * to network format long and copy it out...
475 */
476 cfpkt_extr_head(pkt, &tmp32, 4);
477 linkparam.u.rfm.connid =
478 le32_to_cpu(tmp32);
479 cp = (u8 *) linkparam.u.rfm.volume;
480 for (cfpkt_extr_head(pkt, &tmp, 1);
481 cfpkt_more(pkt) && tmp != '\0';
482 cfpkt_extr_head(pkt, &tmp, 1))
483 *cp++ = tmp;
484 *cp = '\0';
485
486 /* Link ID */
487 cfpkt_extr_head(pkt, &linkid, 1);
488
489 break;
490 case CFCTRL_SRV_UTIL:
491 /* Construct a frame, convert
492 * DatagramConnectionID
493 * to network format long and copy it out...
494 */
495 /* Fifosize KB */
496 cfpkt_extr_head(pkt, &tmp16, 2);
497 linkparam.u.utility.fifosize_kb =
498 le16_to_cpu(tmp16);
499 /* Fifosize bufs */
500 cfpkt_extr_head(pkt, &tmp16, 2);
501 linkparam.u.utility.fifosize_bufs =
502 le16_to_cpu(tmp16);
503 /* name */
504 cp = (u8 *) linkparam.u.utility.name;
505 caif_assert(sizeof(linkparam.u.utility.name)
506 >= UTILITY_NAME_LENGTH);
507 for (i = 0;
508 i < UTILITY_NAME_LENGTH
509 && cfpkt_more(pkt); i++) {
510 cfpkt_extr_head(pkt, &tmp, 1);
511 *cp++ = tmp;
512 }
513 /* Length */
514 cfpkt_extr_head(pkt, &len, 1);
515 linkparam.u.utility.paramlen = len;
516 /* Param Data */
517 cp = linkparam.u.utility.params;
518 while (cfpkt_more(pkt) && len--) {
519 cfpkt_extr_head(pkt, &tmp, 1);
520 *cp++ = tmp;
521 }
522 /* Link ID */
523 cfpkt_extr_head(pkt, &linkid, 1);
524 /* Length */
525 cfpkt_extr_head(pkt, &len, 1);
526 /* Param Data */
527 cfpkt_extr_head(pkt, &param, len);
528 break;
529 default:
530 pr_warning("CAIF: %s(): Request setup "
531 "- invalid link type (%d)",
532 __func__, serv);
533 goto error;
534 }
535
536 rsp.cmd = cmd;
537 rsp.param = linkparam;
538 req = cfctrl_remove_req(cfctrl, &rsp);
539
540 if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
541 cfpkt_erroneous(pkt)) {
542 pr_err("CAIF: %s(): Invalid O/E bit or parse "
543 "error on CAIF control channel",
544 __func__);
545 cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
546 0,
547 req ? req->client_layer
548 : NULL);
549 } else {
550 cfctrl->res.linksetup_rsp(cfctrl->serv.
551 layer.up, linkid,
552 serv, physlinkid,
553 req ? req->
554 client_layer : NULL);
555 }
556
557 if (req != NULL)
558 kfree(req);
559 }
560 break;
561 case CFCTRL_CMD_LINK_DESTROY:
562 cfpkt_extr_head(pkt, &linkid, 1);
563 rsp.cmd = cmd;
564 rsp.channel_id = linkid;
565 req = cfctrl_remove_req(cfctrl, &rsp);
566 cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid,
567 req ? req->client_layer : NULL);
568 if (req != NULL)
569 kfree(req);
570 break;
571 case CFCTRL_CMD_LINK_ERR:
572 pr_err("CAIF: %s(): Frame Error Indication received\n",
573 __func__);
574 cfctrl->res.linkerror_ind();
575 break;
576 case CFCTRL_CMD_ENUM:
577 cfctrl->res.enum_rsp();
578 break;
579 case CFCTRL_CMD_SLEEP:
580 cfctrl->res.sleep_rsp();
581 break;
582 case CFCTRL_CMD_WAKE:
583 cfctrl->res.wake_rsp();
584 break;
585 case CFCTRL_CMD_LINK_RECONF:
586 cfctrl->res.restart_rsp();
587 break;
588 case CFCTRL_CMD_RADIO_SET:
589 cfctrl->res.radioset_rsp();
590 break;
591 default:
592 pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
593 goto error;
594 break;
595 }
596 ret = 0;
597error:
598 cfpkt_destroy(pkt);
599 return ret;
600}
601
602static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
603 int phyid)
604{
605 struct cfctrl *this = container_obj(layr);
606 switch (ctrl) {
607 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
608 case CAIF_CTRLCMD_FLOW_OFF_IND:
609 spin_lock(&this->info_list_lock);
610 if (this->first_req != NULL) {
611 pr_warning("CAIF: %s(): Received flow off in "
612 "control layer", __func__);
613 }
614 spin_unlock(&this->info_list_lock);
615 break;
616 default:
617 break;
618 }
619}
620
621#ifndef CAIF_NO_LOOP
622static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
623{
624 static int last_linkid;
625 u8 linkid, linktype, tmp;
626 switch (cmd) {
627 case CFCTRL_CMD_LINK_SETUP:
628 spin_lock(&ctrl->loop_linkid_lock);
629 for (linkid = last_linkid + 1; linkid < 255; linkid++)
630 if (!ctrl->loop_linkused[linkid])
631 goto found;
632 for (linkid = last_linkid - 1; linkid > 0; linkid--)
633 if (!ctrl->loop_linkused[linkid])
634 goto found;
635 spin_unlock(&ctrl->loop_linkid_lock);
636 return -EINVAL;
637found:
638 if (!ctrl->loop_linkused[linkid])
639 ctrl->loop_linkused[linkid] = 1;
640
641 last_linkid = linkid;
642
643 cfpkt_add_trail(pkt, &linkid, 1);
644 spin_unlock(&ctrl->loop_linkid_lock);
645 cfpkt_peek_head(pkt, &linktype, 1);
646 if (linktype == CFCTRL_SRV_UTIL) {
647 tmp = 0x01;
648 cfpkt_add_trail(pkt, &tmp, 1);
649 cfpkt_add_trail(pkt, &tmp, 1);
650 }
651 break;
652
653 case CFCTRL_CMD_LINK_DESTROY:
654 spin_lock(&ctrl->loop_linkid_lock);
655 cfpkt_peek_head(pkt, &linkid, 1);
656 ctrl->loop_linkused[linkid] = 0;
657 spin_unlock(&ctrl->loop_linkid_lock);
658 break;
659 default:
660 break;
661 }
662 return CAIF_SUCCESS;
663}
664#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
new file mode 100644
index 000000000000..ab6b6dc34cf8
--- /dev/null
+++ b/net/caif/cfdbgl.c
@@ -0,0 +1,40 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfsrvl.h>
11#include <net/caif/cfpkt.h>
12
13static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
14static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
15
16struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
17{
18 struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
19 if (!dbg) {
20 pr_warning("CAIF: %s(): Out of memory\n", __func__);
21 return NULL;
22 }
23 caif_assert(offsetof(struct cfsrvl, layer) == 0);
24 memset(dbg, 0, sizeof(struct cfsrvl));
25 cfsrvl_init(dbg, channel_id, dev_info);
26 dbg->layer.receive = cfdbgl_receive;
27 dbg->layer.transmit = cfdbgl_transmit;
28 snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
29 return &dbg->layer;
30}
31
32static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
33{
34 return layr->up->receive(layr->up, pkt);
35}
36
37static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
38{
39 return layr->dn->transmit(layr->dn, pkt);
40}
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
new file mode 100644
index 000000000000..53194840ecb6
--- /dev/null
+++ b/net/caif/cfdgml.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cfpkt.h>
13
14#define container_obj(layr) ((struct cfsrvl *) layr)
15
16#define DGM_CMD_BIT 0x80
17#define DGM_FLOW_OFF 0x81
18#define DGM_FLOW_ON 0x80
19#define DGM_CTRL_PKT_SIZE 1
20
21static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
22static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
23
24struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
25{
26 struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!dgm) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__);
29 return NULL;
30 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(dgm, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(dgm, channel_id, dev_info);
34 dgm->layer.receive = cfdgml_receive;
35 dgm->layer.transmit = cfdgml_transmit;
36 snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
37 dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
38 return &dgm->layer;
39}
40
41static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
42{
43 u8 cmd = -1;
44 u8 dgmhdr[3];
45 int ret;
46 caif_assert(layr->up != NULL);
47 caif_assert(layr->receive != NULL);
48 caif_assert(layr->ctrlcmd != NULL);
49
50 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
51 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
52 cfpkt_destroy(pkt);
53 return -EPROTO;
54 }
55
56 if ((cmd & DGM_CMD_BIT) == 0) {
57 if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
58 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
59 cfpkt_destroy(pkt);
60 return -EPROTO;
61 }
62 ret = layr->up->receive(layr->up, pkt);
63 return ret;
64 }
65
66 switch (cmd) {
67 case DGM_FLOW_OFF: /* FLOW OFF */
68 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
69 cfpkt_destroy(pkt);
70 return 0;
71 case DGM_FLOW_ON: /* FLOW ON */
72 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
73 cfpkt_destroy(pkt);
74 return 0;
75 default:
76 cfpkt_destroy(pkt);
77 pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
78 __func__, cmd, cmd);
79 return -EPROTO;
80 }
81}
82
83static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
84{
85 u32 zero = 0;
86 struct caif_payload_info *info;
87 struct cfsrvl *service = container_obj(layr);
88 int ret;
89 if (!cfsrvl_ready(service, &ret))
90 return ret;
91
92 cfpkt_add_head(pkt, &zero, 4);
93
94 /* Add info for MUX-layer to route the packet out. */
95 info = cfpkt_info(pkt);
96 info->channel_id = service->layer.id;
97 /* To optimize alignment, we add up the size of CAIF header
98 * before payload.
99 */
100 info->hdr_len = 4;
101 info->dev_info = &service->dev_info;
102 ret = layr->dn->transmit(layr->dn, pkt);
103 if (ret < 0) {
104 u32 tmp32;
105 cfpkt_extr_head(pkt, &tmp32, 4);
106 }
107 return ret;
108}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
new file mode 100644
index 000000000000..e86a4ca3b217
--- /dev/null
+++ b/net/caif/cffrml.c
@@ -0,0 +1,151 @@
1/*
2 * CAIF Framing Layer.
3 *
4 * Copyright (C) ST-Ericsson AB 2010
5 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
6 * License terms: GNU General Public License (GPL) version 2
7 */
8
9#include <linux/stddef.h>
10#include <linux/spinlock.h>
11#include <linux/slab.h>
12#include <linux/crc-ccitt.h>
13#include <net/caif/caif_layer.h>
14#include <net/caif/cfpkt.h>
15#include <net/caif/cffrml.h>
16
17#define container_obj(layr) container_of(layr, struct cffrml, layer)
18
19struct cffrml {
20 struct cflayer layer;
21 bool dofcs; /* !< FCS active */
22};
23
24static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
25static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
27 int phyid);
28
29static u32 cffrml_rcv_error;
30static u32 cffrml_rcv_checsum_error;
31struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
32{
33 struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
34 if (!this) {
35 pr_warning("CAIF: %s(): Out of memory\n", __func__);
36 return NULL;
37 }
38 caif_assert(offsetof(struct cffrml, layer) == 0);
39
40 memset(this, 0, sizeof(struct cflayer));
41 this->layer.receive = cffrml_receive;
42 this->layer.transmit = cffrml_transmit;
43 this->layer.ctrlcmd = cffrml_ctrlcmd;
44 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
45 this->dofcs = use_fcs;
46 this->layer.id = phyid;
47 return (struct cflayer *) this;
48}
49
50void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
51{
52 this->up = up;
53}
54
55void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
56{
57 this->dn = dn;
58}
59
60static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
61{
62 /* FIXME: FCS should be moved to glue in order to use OS-Specific
63 * solutions
64 */
65 return crc_ccitt(chks, buf, len);
66}
67
68static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
69{
70 u16 tmp;
71 u16 len;
72 u16 hdrchks;
73 u16 pktchks;
74 struct cffrml *this;
75 this = container_obj(layr);
76
77 cfpkt_extr_head(pkt, &tmp, 2);
78 len = le16_to_cpu(tmp);
79
80 /* Subtract for FCS on length if FCS is not used. */
81 if (!this->dofcs)
82 len -= 2;
83
84 if (cfpkt_setlen(pkt, len) < 0) {
85 ++cffrml_rcv_error;
86 pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
87 cfpkt_destroy(pkt);
88 return -EPROTO;
89 }
90 /*
91 * Don't do extract if FCS is false, rather do setlen - then we don't
92 * get a cache-miss.
93 */
94 if (this->dofcs) {
95 cfpkt_extr_trail(pkt, &tmp, 2);
96 hdrchks = le16_to_cpu(tmp);
97 pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
98 if (pktchks != hdrchks) {
99 cfpkt_add_trail(pkt, &tmp, 2);
100 ++cffrml_rcv_error;
101 ++cffrml_rcv_checsum_error;
102 pr_info("CAIF: %s(): Frame checksum error "
103 "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
104 return -EILSEQ;
105 }
106 }
107 if (cfpkt_erroneous(pkt)) {
108 ++cffrml_rcv_error;
109 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
110 cfpkt_destroy(pkt);
111 return -EPROTO;
112 }
113 return layr->up->receive(layr->up, pkt);
114}
115
116static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
117{
118 int tmp;
119 u16 chks;
120 u16 len;
121 int ret;
122 struct cffrml *this = container_obj(layr);
123 if (this->dofcs) {
124 chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
125 tmp = cpu_to_le16(chks);
126 cfpkt_add_trail(pkt, &tmp, 2);
127 } else {
128 cfpkt_pad_trail(pkt, 2);
129 }
130 len = cfpkt_getlen(pkt);
131 tmp = cpu_to_le16(len);
132 cfpkt_add_head(pkt, &tmp, 2);
133 cfpkt_info(pkt)->hdr_len += 2;
134 if (cfpkt_erroneous(pkt)) {
135 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
136 return -EPROTO;
137 }
138 ret = layr->dn->transmit(layr->dn, pkt);
139 if (ret < 0) {
140 /* Remove header on faulty packet. */
141 cfpkt_extr_head(pkt, &tmp, 2);
142 }
143 return ret;
144}
145
146static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
147 int phyid)
148{
149 if (layr->up->ctrlcmd)
150 layr->up->ctrlcmd(layr->up, ctrl, layr->id);
151}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
new file mode 100644
index 000000000000..6fb9f9e96cf8
--- /dev/null
+++ b/net/caif/cfmuxl.c
@@ -0,0 +1,246 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6#include <linux/stddef.h>
7#include <linux/spinlock.h>
8#include <linux/slab.h>
9#include <net/caif/cfpkt.h>
10#include <net/caif/cfmuxl.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cffrml.h>
13
14#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
15
16#define CAIF_CTRL_CHANNEL 0
17#define UP_CACHE_SIZE 8
18#define DN_CACHE_SIZE 8
19
20struct cfmuxl {
21 struct cflayer layer;
22 struct list_head srvl_list;
23 struct list_head frml_list;
24 struct cflayer *up_cache[UP_CACHE_SIZE];
25 struct cflayer *dn_cache[DN_CACHE_SIZE];
26 /*
27 * Set when inserting or removing downwards layers.
28 */
29 spinlock_t transmit_lock;
30
31 /*
32 * Set when inserting or removing upwards layers.
33 */
34 spinlock_t receive_lock;
35
36};
37
38static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
39static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
40static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
41 int phyid);
42static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
43
44struct cflayer *cfmuxl_create(void)
45{
46 struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
47 if (!this)
48 return NULL;
49 memset(this, 0, sizeof(*this));
50 this->layer.receive = cfmuxl_receive;
51 this->layer.transmit = cfmuxl_transmit;
52 this->layer.ctrlcmd = cfmuxl_ctrlcmd;
53 INIT_LIST_HEAD(&this->srvl_list);
54 INIT_LIST_HEAD(&this->frml_list);
55 spin_lock_init(&this->transmit_lock);
56 spin_lock_init(&this->receive_lock);
57 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
58 return &this->layer;
59}
60
61int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
62{
63 struct cfmuxl *muxl = container_obj(layr);
64 spin_lock(&muxl->receive_lock);
65 list_add(&up->node, &muxl->srvl_list);
66 spin_unlock(&muxl->receive_lock);
67 return 0;
68}
69
70bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
71{
72 struct list_head *node;
73 struct cflayer *layer;
74 struct cfmuxl *muxl = container_obj(layr);
75 bool match = false;
76 spin_lock(&muxl->receive_lock);
77
78 list_for_each(node, &muxl->srvl_list) {
79 layer = list_entry(node, struct cflayer, node);
80 if (cfsrvl_phyid_match(layer, phyid)) {
81 match = true;
82 break;
83 }
84
85 }
86 spin_unlock(&muxl->receive_lock);
87 return match;
88}
89
90u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
91{
92 struct cflayer *up;
93 int phyid;
94 struct cfmuxl *muxl = container_obj(layr);
95 spin_lock(&muxl->receive_lock);
96 up = get_up(muxl, channel_id);
97 if (up != NULL)
98 phyid = cfsrvl_getphyid(up);
99 else
100 phyid = 0;
101 spin_unlock(&muxl->receive_lock);
102 return phyid;
103}
104
105int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
106{
107 struct cfmuxl *muxl = (struct cfmuxl *) layr;
108 spin_lock(&muxl->transmit_lock);
109 list_add(&dn->node, &muxl->frml_list);
110 spin_unlock(&muxl->transmit_lock);
111 return 0;
112}
113
114static struct cflayer *get_from_id(struct list_head *list, u16 id)
115{
116 struct list_head *node;
117 struct cflayer *layer;
118 list_for_each(node, list) {
119 layer = list_entry(node, struct cflayer, node);
120 if (layer->id == id)
121 return layer;
122 }
123 return NULL;
124}
125
126struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
127{
128 struct cfmuxl *muxl = container_obj(layr);
129 struct cflayer *dn;
130 spin_lock(&muxl->transmit_lock);
131 memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
132 dn = get_from_id(&muxl->frml_list, phyid);
133 if (dn == NULL) {
134 spin_unlock(&muxl->transmit_lock);
135 return NULL;
136 }
137 list_del(&dn->node);
138 caif_assert(dn != NULL);
139 spin_unlock(&muxl->transmit_lock);
140 return dn;
141}
142
143/* Invariant: lock is taken */
144static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
145{
146 struct cflayer *up;
147 int idx = id % UP_CACHE_SIZE;
148 up = muxl->up_cache[idx];
149 if (up == NULL || up->id != id) {
150 up = get_from_id(&muxl->srvl_list, id);
151 muxl->up_cache[idx] = up;
152 }
153 return up;
154}
155
156/* Invariant: lock is taken */
157static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
158{
159 struct cflayer *dn;
160 int idx = dev_info->id % DN_CACHE_SIZE;
161 dn = muxl->dn_cache[idx];
162 if (dn == NULL || dn->id != dev_info->id) {
163 dn = get_from_id(&muxl->frml_list, dev_info->id);
164 muxl->dn_cache[idx] = dn;
165 }
166 return dn;
167}
168
169struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
170{
171 struct cflayer *up;
172 struct cfmuxl *muxl = container_obj(layr);
173 spin_lock(&muxl->receive_lock);
174 up = get_up(muxl, id);
175 memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
176 list_del(&up->node);
177 spin_unlock(&muxl->receive_lock);
178 return up;
179}
180
181static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
182{
183 int ret;
184 struct cfmuxl *muxl = container_obj(layr);
185 u8 id;
186 struct cflayer *up;
187 if (cfpkt_extr_head(pkt, &id, 1) < 0) {
188 pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
189 cfpkt_destroy(pkt);
190 return -EPROTO;
191 }
192
193 spin_lock(&muxl->receive_lock);
194 up = get_up(muxl, id);
195 spin_unlock(&muxl->receive_lock);
196 if (up == NULL) {
197 pr_info("CAIF: %s():Received data on unknown link ID = %d "
198 "(0x%x) up == NULL", __func__, id, id);
199 cfpkt_destroy(pkt);
200 /*
201 * Don't return ERROR, since modem misbehaves and sends out
202 * flow on before linksetup response.
203 */
204 return /* CFGLU_EPROT; */ 0;
205 }
206
207 ret = up->receive(up, pkt);
208 return ret;
209}
210
211static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
212{
213 int ret;
214 struct cfmuxl *muxl = container_obj(layr);
215 u8 linkid;
216 struct cflayer *dn;
217 struct caif_payload_info *info = cfpkt_info(pkt);
218 dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
219 if (dn == NULL) {
220 pr_warning("CAIF: %s(): Send data on unknown phy "
221 "ID = %d (0x%x)\n",
222 __func__, info->dev_info->id, info->dev_info->id);
223 return -ENOTCONN;
224 }
225 info->hdr_len += 1;
226 linkid = info->channel_id;
227 cfpkt_add_head(pkt, &linkid, 1);
228 ret = dn->transmit(dn, pkt);
229 /* Remove MUX protocol header upon error. */
230 if (ret < 0)
231 cfpkt_extr_head(pkt, &linkid, 1);
232 return ret;
233}
234
235static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
236 int phyid)
237{
238 struct cfmuxl *muxl = container_obj(layr);
239 struct list_head *node;
240 struct cflayer *layer;
241 list_for_each(node, &muxl->srvl_list) {
242 layer = list_entry(node, struct cflayer, node);
243 if (cfsrvl_phyid_match(layer, phyid))
244 layer->ctrlcmd(layer, ctrl, phyid);
245 }
246}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
new file mode 100644
index 000000000000..83fff2ff6658
--- /dev/null
+++ b/net/caif/cfpkt_skbuff.c
@@ -0,0 +1,571 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/string.h>
8#include <linux/skbuff.h>
9#include <linux/hardirq.h>
10#include <net/caif/cfpkt.h>
11
12#define PKT_PREFIX CAIF_NEEDED_HEADROOM
13#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
14#define PKT_LEN_WHEN_EXTENDING 128
15#define PKT_ERROR(pkt, errmsg) do { \
16 cfpkt_priv(pkt)->erronous = true; \
17 skb_reset_tail_pointer(&pkt->skb); \
18 pr_warning("CAIF: " errmsg);\
19 } while (0)
20
21struct cfpktq {
22 struct sk_buff_head head;
23 atomic_t count;
24 /* Lock protects count updates */
25 spinlock_t lock;
26};
27
28/*
29 * net/caif/ is generic and does not
30 * understand SKB, so we do this typecast
31 */
32struct cfpkt {
33 struct sk_buff skb;
34};
35
36/* Private data inside SKB */
37struct cfpkt_priv_data {
38 struct dev_info dev_info;
39 bool erronous;
40};
41
42inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
43{
44 return (struct cfpkt_priv_data *) pkt->skb.cb;
45}
46
47inline bool is_erronous(struct cfpkt *pkt)
48{
49 return cfpkt_priv(pkt)->erronous;
50}
51
52inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
53{
54 return &pkt->skb;
55}
56
57inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
58{
59 return (struct cfpkt *) skb;
60}
61
62
63struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
64{
65 struct cfpkt *pkt = skb_to_pkt(nativepkt);
66 cfpkt_priv(pkt)->erronous = false;
67 return pkt;
68}
69EXPORT_SYMBOL(cfpkt_fromnative);
70
71void *cfpkt_tonative(struct cfpkt *pkt)
72{
73 return (void *) pkt;
74}
75EXPORT_SYMBOL(cfpkt_tonative);
76
77static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
78{
79 struct sk_buff *skb;
80
81 if (likely(in_interrupt()))
82 skb = alloc_skb(len + pfx, GFP_ATOMIC);
83 else
84 skb = alloc_skb(len + pfx, GFP_KERNEL);
85
86 if (unlikely(skb == NULL))
87 return NULL;
88
89 skb_reserve(skb, pfx);
90 return skb_to_pkt(skb);
91}
92
93inline struct cfpkt *cfpkt_create(u16 len)
94{
95 return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
96}
97EXPORT_SYMBOL(cfpkt_create);
98
99void cfpkt_destroy(struct cfpkt *pkt)
100{
101 struct sk_buff *skb = pkt_to_skb(pkt);
102 kfree_skb(skb);
103}
104EXPORT_SYMBOL(cfpkt_destroy);
105
106inline bool cfpkt_more(struct cfpkt *pkt)
107{
108 struct sk_buff *skb = pkt_to_skb(pkt);
109 return skb->len > 0;
110}
111EXPORT_SYMBOL(cfpkt_more);
112
113int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
114{
115 struct sk_buff *skb = pkt_to_skb(pkt);
116 if (skb_headlen(skb) >= len) {
117 memcpy(data, skb->data, len);
118 return 0;
119 }
120 return !cfpkt_extr_head(pkt, data, len) &&
121 !cfpkt_add_head(pkt, data, len);
122}
123EXPORT_SYMBOL(cfpkt_peek_head);
124
125int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
126{
127 struct sk_buff *skb = pkt_to_skb(pkt);
128 u8 *from;
129 if (unlikely(is_erronous(pkt)))
130 return -EPROTO;
131
132 if (unlikely(len > skb->len)) {
133 PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
134 return -EPROTO;
135 }
136
137 if (unlikely(len > skb_headlen(skb))) {
138 if (unlikely(skb_linearize(skb) != 0)) {
139 PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
140 return -EPROTO;
141 }
142 }
143 from = skb_pull(skb, len);
144 from -= len;
145 memcpy(data, from, len);
146 return 0;
147}
148EXPORT_SYMBOL(cfpkt_extr_head);
149
150int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
151{
152 struct sk_buff *skb = pkt_to_skb(pkt);
153 u8 *data = dta;
154 u8 *from;
155 if (unlikely(is_erronous(pkt)))
156 return -EPROTO;
157
158 if (unlikely(skb_linearize(skb) != 0)) {
159 PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
160 return -EPROTO;
161 }
162 if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
163 PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
164 return -EPROTO;
165 }
166 from = skb_tail_pointer(skb) - len;
167 skb_trim(skb, skb->len - len);
168 memcpy(data, from, len);
169 return 0;
170}
171EXPORT_SYMBOL(cfpkt_extr_trail);
172
173int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
174{
175 return cfpkt_add_body(pkt, NULL, len);
176}
177EXPORT_SYMBOL(cfpkt_pad_trail);
178
179int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
180{
181 struct sk_buff *skb = pkt_to_skb(pkt);
182 struct sk_buff *lastskb;
183 u8 *to;
184 u16 addlen = 0;
185
186
187 if (unlikely(is_erronous(pkt)))
188 return -EPROTO;
189
190 lastskb = skb;
191
192 /* Check whether we need to add space at the tail */
193 if (unlikely(skb_tailroom(skb) < len)) {
194 if (likely(len < PKT_LEN_WHEN_EXTENDING))
195 addlen = PKT_LEN_WHEN_EXTENDING;
196 else
197 addlen = len;
198 }
199
200 /* Check whether we need to change the SKB before writing to the tail */
201 if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
202
203 /* Make sure data is writable */
204 if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
205 PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
206 return -EPROTO;
207 }
208 /*
209 * Is the SKB non-linear after skb_cow_data()? If so, we are
210 * going to add data to the last SKB, so we need to adjust
211 * lengths of the top SKB.
212 */
213 if (lastskb != skb) {
214 pr_warning("CAIF: %s(): Packet is non-linear\n",
215 __func__);
216 skb->len += len;
217 skb->data_len += len;
218 }
219 }
220
221 /* All set to put the last SKB and optionally write data there. */
222 to = skb_put(lastskb, len);
223 if (likely(data))
224 memcpy(to, data, len);
225 return 0;
226}
227EXPORT_SYMBOL(cfpkt_add_body);
228
229inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
230{
231 return cfpkt_add_body(pkt, &data, 1);
232}
233EXPORT_SYMBOL(cfpkt_addbdy);
234
235int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
236{
237 struct sk_buff *skb = pkt_to_skb(pkt);
238 struct sk_buff *lastskb;
239 u8 *to;
240 const u8 *data = data2;
241 if (unlikely(is_erronous(pkt)))
242 return -EPROTO;
243 if (unlikely(skb_headroom(skb) < len)) {
244 PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
245 return -EPROTO;
246 }
247
248 /* Make sure data is writable */
249 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
250 PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
251 return -EPROTO;
252 }
253
254 to = skb_push(skb, len);
255 memcpy(to, data, len);
256 return 0;
257}
258EXPORT_SYMBOL(cfpkt_add_head);
259
260inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
261{
262 return cfpkt_add_body(pkt, data, len);
263}
264EXPORT_SYMBOL(cfpkt_add_trail);
265
266inline u16 cfpkt_getlen(struct cfpkt *pkt)
267{
268 struct sk_buff *skb = pkt_to_skb(pkt);
269 return skb->len;
270}
271EXPORT_SYMBOL(cfpkt_getlen);
272
273inline u16 cfpkt_iterate(struct cfpkt *pkt,
274 u16 (*iter_func)(u16, void *, u16),
275 u16 data)
276{
277 /*
278 * Don't care about the performance hit of linearizing,
279 * Checksum should not be used on high-speed interfaces anyway.
280 */
281 if (unlikely(is_erronous(pkt)))
282 return -EPROTO;
283 if (unlikely(skb_linearize(&pkt->skb) != 0)) {
284 PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
285 return -EPROTO;
286 }
287 return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
288}
289EXPORT_SYMBOL(cfpkt_iterate);
290
291int cfpkt_setlen(struct cfpkt *pkt, u16 len)
292{
293 struct sk_buff *skb = pkt_to_skb(pkt);
294
295
296 if (unlikely(is_erronous(pkt)))
297 return -EPROTO;
298
299 if (likely(len <= skb->len)) {
300 if (unlikely(skb->data_len))
301 ___pskb_trim(skb, len);
302 else
303 skb_trim(skb, len);
304
305 return cfpkt_getlen(pkt);
306 }
307
308 /* Need to expand SKB */
309 if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
310 PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
311
312 return cfpkt_getlen(pkt);
313}
314EXPORT_SYMBOL(cfpkt_setlen);
315
316struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
317{
318 struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
319 if (unlikely(data != NULL))
320 cfpkt_add_body(pkt, data, len);
321 return pkt;
322}
323EXPORT_SYMBOL(cfpkt_create_uplink);
324
325struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
326 struct cfpkt *addpkt,
327 u16 expectlen)
328{
329 struct sk_buff *dst = pkt_to_skb(dstpkt);
330 struct sk_buff *add = pkt_to_skb(addpkt);
331 u16 addlen = skb_headlen(add);
332 u16 neededtailspace;
333 struct sk_buff *tmp;
334 u16 dstlen;
335 u16 createlen;
336 if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
337 cfpkt_destroy(addpkt);
338 return dstpkt;
339 }
340 if (expectlen > addlen)
341 neededtailspace = expectlen;
342 else
343 neededtailspace = addlen;
344
345 if (dst->tail + neededtailspace > dst->end) {
346 /* Create a dumplicate of 'dst' with more tail space */
347 dstlen = skb_headlen(dst);
348 createlen = dstlen + neededtailspace;
349 tmp = pkt_to_skb(
350 cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
351 if (!tmp)
352 return NULL;
353 skb_set_tail_pointer(tmp, dstlen);
354 tmp->len = dstlen;
355 memcpy(tmp->data, dst->data, dstlen);
356 cfpkt_destroy(dstpkt);
357 dst = tmp;
358 }
359 memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
360 cfpkt_destroy(addpkt);
361 dst->tail += addlen;
362 dst->len += addlen;
363 return skb_to_pkt(dst);
364}
365EXPORT_SYMBOL(cfpkt_append);
366
367struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
368{
369 struct sk_buff *skb2;
370 struct sk_buff *skb = pkt_to_skb(pkt);
371 u8 *split = skb->data + pos;
372 u16 len2nd = skb_tail_pointer(skb) - split;
373
374 if (unlikely(is_erronous(pkt)))
375 return NULL;
376
377 if (skb->data + pos > skb_tail_pointer(skb)) {
378 PKT_ERROR(pkt,
379 "cfpkt_split: trying to split beyond end of packet");
380 return NULL;
381 }
382
383 /* Create a new packet for the second part of the data */
384 skb2 = pkt_to_skb(
385 cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
386 PKT_PREFIX));
387
388 if (skb2 == NULL)
389 return NULL;
390
391 /* Reduce the length of the original packet */
392 skb_set_tail_pointer(skb, pos);
393 skb->len = pos;
394
395 memcpy(skb2->data, split, len2nd);
396 skb2->tail += len2nd;
397 skb2->len += len2nd;
398 return skb_to_pkt(skb2);
399}
400EXPORT_SYMBOL(cfpkt_split);
401
402char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
403{
404 struct sk_buff *skb = pkt_to_skb(pkt);
405 char *p = buf;
406 int i;
407
408 /*
409 * Sanity check buffer length, it needs to be at least as large as
410 * the header info: ~=50+ bytes
411 */
412 if (buflen < 50)
413 return NULL;
414
415 snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
416 is_erronous(pkt) ? "ERRONOUS-SKB" :
417 (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
418 skb,
419 (long) skb->len,
420 (long) (skb_tail_pointer(skb) - skb->data),
421 (long) skb->data_len,
422 (long) (skb->data - skb->head),
423 (long) (skb_tail_pointer(skb) - skb->head));
424 p = buf + strlen(buf);
425
426 for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
427 if (p > buf + buflen - 10) {
428 sprintf(p, "...");
429 p = buf + strlen(buf);
430 break;
431 }
432 sprintf(p, "%02x,", skb->data[i]);
433 p = buf + strlen(buf);
434 }
435 sprintf(p, "]\n");
436 return buf;
437}
438EXPORT_SYMBOL(cfpkt_log_pkt);
439
440int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
441{
442 struct sk_buff *skb = pkt_to_skb(pkt);
443 struct sk_buff *lastskb;
444
445 caif_assert(buf != NULL);
446 if (unlikely(is_erronous(pkt)))
447 return -EPROTO;
448 /* Make sure SKB is writable */
449 if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
450 PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
451 return -EPROTO;
452 }
453
454 if (unlikely(skb_linearize(skb) != 0)) {
455 PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
456 return -EPROTO;
457 }
458
459 if (unlikely(skb_tailroom(skb) < buflen)) {
460 PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
461 return -EPROTO;
462 }
463
464 *buf = skb_put(skb, buflen);
465 return 1;
466}
467EXPORT_SYMBOL(cfpkt_raw_append);
468
469int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
470{
471 struct sk_buff *skb = pkt_to_skb(pkt);
472
473 caif_assert(buf != NULL);
474 if (unlikely(is_erronous(pkt)))
475 return -EPROTO;
476
477 if (unlikely(buflen > skb->len)) {
478 PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
479 "- failed\n");
480 return -EPROTO;
481 }
482
483 if (unlikely(buflen > skb_headlen(skb))) {
484 if (unlikely(skb_linearize(skb) != 0)) {
485 PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
486 return -EPROTO;
487 }
488 }
489
490 *buf = skb->data;
491 skb_pull(skb, buflen);
492
493 return 1;
494}
495EXPORT_SYMBOL(cfpkt_raw_extract);
496
497inline bool cfpkt_erroneous(struct cfpkt *pkt)
498{
499 return cfpkt_priv(pkt)->erronous;
500}
501EXPORT_SYMBOL(cfpkt_erroneous);
502
503struct cfpktq *cfpktq_create(void)
504{
505 struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
506 if (!q)
507 return NULL;
508 skb_queue_head_init(&q->head);
509 atomic_set(&q->count, 0);
510 spin_lock_init(&q->lock);
511 return q;
512}
513EXPORT_SYMBOL(cfpktq_create);
514
515void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
516{
517 atomic_inc(&pktq->count);
518 spin_lock(&pktq->lock);
519 skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
520 spin_unlock(&pktq->lock);
521
522}
523EXPORT_SYMBOL(cfpkt_queue);
524
525struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
526{
527 struct cfpkt *tmp;
528 spin_lock(&pktq->lock);
529 tmp = skb_to_pkt(skb_peek(&pktq->head));
530 spin_unlock(&pktq->lock);
531 return tmp;
532}
533EXPORT_SYMBOL(cfpkt_qpeek);
534
535struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
536{
537 struct cfpkt *pkt;
538 spin_lock(&pktq->lock);
539 pkt = skb_to_pkt(skb_dequeue(&pktq->head));
540 if (pkt) {
541 atomic_dec(&pktq->count);
542 caif_assert(atomic_read(&pktq->count) >= 0);
543 }
544 spin_unlock(&pktq->lock);
545 return pkt;
546}
547EXPORT_SYMBOL(cfpkt_dequeue);
548
549int cfpkt_qcount(struct cfpktq *pktq)
550{
551 return atomic_read(&pktq->count);
552}
553EXPORT_SYMBOL(cfpkt_qcount);
554
555struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
556{
557 struct cfpkt *clone;
558 clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
559 /* Free original packet. */
560 cfpkt_destroy(pkt);
561 if (!clone)
562 return NULL;
563 return clone;
564}
565EXPORT_SYMBOL(cfpkt_clone_release);
566
567struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
568{
569 return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
570}
571EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
new file mode 100644
index 000000000000..cd2830fec935
--- /dev/null
+++ b/net/caif/cfrfml.c
@@ -0,0 +1,108 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfsrvl.h>
12#include <net/caif/cfpkt.h>
13
14#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
15
16#define RFM_SEGMENTATION_BIT 0x01
17#define RFM_PAYLOAD 0x00
18#define RFM_CMD_BIT 0x80
19#define RFM_FLOW_OFF 0x81
20#define RFM_FLOW_ON 0x80
21#define RFM_SET_PIN 0x82
22#define RFM_CTRL_PKT_SIZE 1
23
24static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
25static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
26static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
27
28struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
29{
30 struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
31 if (!rfm) {
32 pr_warning("CAIF: %s(): Out of memory\n", __func__);
33 return NULL;
34 }
35 caif_assert(offsetof(struct cfsrvl, layer) == 0);
36 memset(rfm, 0, sizeof(struct cfsrvl));
37 cfsrvl_init(rfm, channel_id, dev_info);
38 rfm->layer.modemcmd = cfservl_modemcmd;
39 rfm->layer.receive = cfrfml_receive;
40 rfm->layer.transmit = cfrfml_transmit;
41 snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
42 return &rfm->layer;
43}
44
45static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
46{
47 return -EPROTO;
48}
49
50static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
51{
52 u8 tmp;
53 bool segmented;
54 int ret;
55 caif_assert(layr->up != NULL);
56 caif_assert(layr->receive != NULL);
57
58 /*
59 * RFM is taking care of segmentation and stripping of
60 * segmentation bit.
61 */
62 if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
63 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
64 cfpkt_destroy(pkt);
65 return -EPROTO;
66 }
67 segmented = tmp & RFM_SEGMENTATION_BIT;
68 caif_assert(!segmented);
69
70 ret = layr->up->receive(layr->up, pkt);
71 return ret;
72}
73
74static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
75{
76 u8 tmp = 0;
77 int ret;
78 struct cfsrvl *service = container_obj(layr);
79
80 caif_assert(layr->dn != NULL);
81 caif_assert(layr->dn->transmit != NULL);
82
83 if (!cfsrvl_ready(service, &ret))
84 return ret;
85
86 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
87 pr_err("CAIF: %s():Packet too large - size=%d\n",
88 __func__, cfpkt_getlen(pkt));
89 return -EOVERFLOW;
90 }
91 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
92 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
93 return -EPROTO;
94 }
95
96 /* Add info for MUX-layer to route the packet out. */
97 cfpkt_info(pkt)->channel_id = service->layer.id;
98 /*
99 * To optimize alignment, we add up the size of CAIF header before
100 * payload.
101 */
102 cfpkt_info(pkt)->hdr_len = 1;
103 cfpkt_info(pkt)->dev_info = &service->dev_info;
104 ret = layr->dn->transmit(layr->dn, pkt);
105 if (ret < 0)
106 cfpkt_extr_head(pkt, &tmp, 1);
107 return ret;
108}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/spinlock.h>
9#include <linux/slab.h>
10#include <net/caif/caif_layer.h>
11#include <net/caif/cfpkt.h>
12#include <net/caif/cfserl.h>
13
14#define container_obj(layr) ((struct cfserl *) layr)
15
16#define CFSERL_STX 0x02
17#define CAIF_MINIUM_PACKET_SIZE 4
18struct cfserl {
19 struct cflayer layer;
20 struct cfpkt *incomplete_frm;
21 /* Protects parallel processing of incoming packets */
22 spinlock_t sync;
23 bool usestx;
24};
25#define STXLEN(layr) (layr->usestx ? 1 : 0)
26
27static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
28static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
29static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
30 int phyid);
31
32struct cflayer *cfserl_create(int type, int instance, bool use_stx)
33{
34 struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
35 if (!this) {
36 pr_warning("CAIF: %s(): Out of memory\n", __func__);
37 return NULL;
38 }
39 caif_assert(offsetof(struct cfserl, layer) == 0);
40 memset(this, 0, sizeof(struct cfserl));
41 this->layer.receive = cfserl_receive;
42 this->layer.transmit = cfserl_transmit;
43 this->layer.ctrlcmd = cfserl_ctrlcmd;
44 this->layer.type = type;
45 this->usestx = use_stx;
46 spin_lock_init(&this->sync);
47 snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
48 return &this->layer;
49}
50
51static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
52{
53 struct cfserl *layr = container_obj(l);
54 u16 pkt_len;
55 struct cfpkt *pkt = NULL;
56 struct cfpkt *tail_pkt = NULL;
57 u8 tmp8;
58 u16 tmp;
59 u8 stx = CFSERL_STX;
60 int ret;
61 u16 expectlen = 0;
62 caif_assert(newpkt != NULL);
63 spin_lock(&layr->sync);
64
65 if (layr->incomplete_frm != NULL) {
66
67 layr->incomplete_frm =
68 cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
69 pkt = layr->incomplete_frm;
70 } else {
71 pkt = newpkt;
72 }
73 layr->incomplete_frm = NULL;
74
75 do {
76 /* Search for STX at start of pkt if STX is used */
77 if (layr->usestx) {
78 cfpkt_extr_head(pkt, &tmp8, 1);
79 if (tmp8 != CFSERL_STX) {
80 while (cfpkt_more(pkt)
81 && tmp8 != CFSERL_STX) {
82 cfpkt_extr_head(pkt, &tmp8, 1);
83 }
84 if (!cfpkt_more(pkt)) {
85 cfpkt_destroy(pkt);
86 layr->incomplete_frm = NULL;
87 spin_unlock(&layr->sync);
88 return -EPROTO;
89 }
90 }
91 }
92
93 pkt_len = cfpkt_getlen(pkt);
94
95 /*
96 * pkt_len is the accumulated length of the packet data
97 * we have received so far.
98 * Exit if frame doesn't hold length.
99 */
100
101 if (pkt_len < 2) {
102 if (layr->usestx)
103 cfpkt_add_head(pkt, &stx, 1);
104 layr->incomplete_frm = pkt;
105 spin_unlock(&layr->sync);
106 return 0;
107 }
108
109 /*
110 * Find length of frame.
111 * expectlen is the length we need for a full frame.
112 */
113 cfpkt_peek_head(pkt, &tmp, 2);
114 expectlen = le16_to_cpu(tmp) + 2;
115 /*
116 * Frame error handling
117 */
118 if (expectlen < CAIF_MINIUM_PACKET_SIZE
119 || expectlen > CAIF_MAX_FRAMESIZE) {
120 if (!layr->usestx) {
121 if (pkt != NULL)
122 cfpkt_destroy(pkt);
123 layr->incomplete_frm = NULL;
124 expectlen = 0;
125 spin_unlock(&layr->sync);
126 return -EPROTO;
127 }
128 continue;
129 }
130
131 if (pkt_len < expectlen) {
132 /* Too little received data */
133 if (layr->usestx)
134 cfpkt_add_head(pkt, &stx, 1);
135 layr->incomplete_frm = pkt;
136 spin_unlock(&layr->sync);
137 return 0;
138 }
139
140 /*
141 * Enough data for at least one frame.
142 * Split the frame, if too long
143 */
144 if (pkt_len > expectlen)
145 tail_pkt = cfpkt_split(pkt, expectlen);
146 else
147 tail_pkt = NULL;
148
149 /* Send the first part of packet upwards.*/
150 spin_unlock(&layr->sync);
151 ret = layr->layer.up->receive(layr->layer.up, pkt);
152 spin_lock(&layr->sync);
153 if (ret == -EILSEQ) {
154 if (layr->usestx) {
155 if (tail_pkt != NULL)
156 pkt = cfpkt_append(pkt, tail_pkt, 0);
157
158 /* Start search for next STX if frame failed */
159 continue;
160 } else {
161 cfpkt_destroy(pkt);
162 pkt = NULL;
163 }
164 }
165
166 pkt = tail_pkt;
167
168 } while (pkt != NULL);
169
170 spin_unlock(&layr->sync);
171 return 0;
172}
173
174static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
175{
176 struct cfserl *layr = container_obj(layer);
177 int ret;
178 u8 tmp8 = CFSERL_STX;
179 if (layr->usestx)
180 cfpkt_add_head(newpkt, &tmp8, 1);
181 ret = layer->dn->transmit(layer->dn, newpkt);
182 if (ret < 0)
183 cfpkt_extr_head(newpkt, &tmp8, 1);
184
185 return ret;
186}
187
188static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
189 int phyid)
190{
191 layr->up->ctrlcmd(layr->up, ctrl, phyid);
192}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
new file mode 100644
index 000000000000..d470c51c6431
--- /dev/null
+++ b/net/caif/cfsrvl.c
@@ -0,0 +1,185 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/errno.h>
10#include <linux/slab.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define SRVL_CTRL_PKT_SIZE 1
16#define SRVL_FLOW_OFF 0x81
17#define SRVL_FLOW_ON 0x80
18#define SRVL_SET_PIN 0x82
19#define SRVL_CTRL_PKT_SIZE 1
20
21#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
22
23static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
24 int phyid)
25{
26 struct cfsrvl *service = container_obj(layr);
27 caif_assert(layr->up != NULL);
28 caif_assert(layr->up->ctrlcmd != NULL);
29 switch (ctrl) {
30 case CAIF_CTRLCMD_INIT_RSP:
31 service->open = true;
32 layr->up->ctrlcmd(layr->up, ctrl, phyid);
33 break;
34 case CAIF_CTRLCMD_DEINIT_RSP:
35 case CAIF_CTRLCMD_INIT_FAIL_RSP:
36 service->open = false;
37 layr->up->ctrlcmd(layr->up, ctrl, phyid);
38 break;
39 case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
40 if (phyid != service->dev_info.id)
41 break;
42 if (service->modem_flow_on)
43 layr->up->ctrlcmd(layr->up,
44 CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
45 service->phy_flow_on = false;
46 break;
47 case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
48 if (phyid != service->dev_info.id)
49 return;
50 if (service->modem_flow_on) {
51 layr->up->ctrlcmd(layr->up,
52 CAIF_CTRLCMD_FLOW_ON_IND,
53 phyid);
54 }
55 service->phy_flow_on = true;
56 break;
57 case CAIF_CTRLCMD_FLOW_OFF_IND:
58 if (service->phy_flow_on) {
59 layr->up->ctrlcmd(layr->up,
60 CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
61 }
62 service->modem_flow_on = false;
63 break;
64 case CAIF_CTRLCMD_FLOW_ON_IND:
65 if (service->phy_flow_on) {
66 layr->up->ctrlcmd(layr->up,
67 CAIF_CTRLCMD_FLOW_ON_IND, phyid);
68 }
69 service->modem_flow_on = true;
70 break;
71 case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
72 /* In case interface is down, let's fake a remove shutdown */
73 layr->up->ctrlcmd(layr->up,
74 CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
75 break;
76 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
77 layr->up->ctrlcmd(layr->up, ctrl, phyid);
78 break;
79 default:
80 pr_warning("CAIF: %s(): "
81 "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
82 /* We have both modem and phy flow on, send flow on */
83 layr->up->ctrlcmd(layr->up, ctrl, phyid);
84 service->phy_flow_on = true;
85 break;
86 }
87}
88
89static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
90{
91 struct cfsrvl *service = container_obj(layr);
92 caif_assert(layr != NULL);
93 caif_assert(layr->dn != NULL);
94 caif_assert(layr->dn->transmit != NULL);
95 switch (ctrl) {
96 case CAIF_MODEMCMD_FLOW_ON_REQ:
97 {
98 struct cfpkt *pkt;
99 struct caif_payload_info *info;
100 u8 flow_on = SRVL_FLOW_ON;
101 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
102 if (!pkt) {
103 pr_warning("CAIF: %s(): Out of memory\n",
104 __func__);
105 return -ENOMEM;
106 }
107
108 if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
109 pr_err("CAIF: %s(): Packet is erroneous!\n",
110 __func__);
111 cfpkt_destroy(pkt);
112 return -EPROTO;
113 }
114 info = cfpkt_info(pkt);
115 info->channel_id = service->layer.id;
116 info->hdr_len = 1;
117 info->dev_info = &service->dev_info;
118 return layr->dn->transmit(layr->dn, pkt);
119 }
120 case CAIF_MODEMCMD_FLOW_OFF_REQ:
121 {
122 struct cfpkt *pkt;
123 struct caif_payload_info *info;
124 u8 flow_off = SRVL_FLOW_OFF;
125 pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
126 if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
127 pr_err("CAIF: %s(): Packet is erroneous!\n",
128 __func__);
129 cfpkt_destroy(pkt);
130 return -EPROTO;
131 }
132 info = cfpkt_info(pkt);
133 info->channel_id = service->layer.id;
134 info->hdr_len = 1;
135 info->dev_info = &service->dev_info;
136 return layr->dn->transmit(layr->dn, pkt);
137 }
138 default:
139 break;
140 }
141 return -EINVAL;
142}
143
144void cfservl_destroy(struct cflayer *layer)
145{
146 kfree(layer);
147}
148
149void cfsrvl_init(struct cfsrvl *service,
150 u8 channel_id,
151 struct dev_info *dev_info)
152{
153 caif_assert(offsetof(struct cfsrvl, layer) == 0);
154 service->open = false;
155 service->modem_flow_on = true;
156 service->phy_flow_on = true;
157 service->layer.id = channel_id;
158 service->layer.ctrlcmd = cfservl_ctrlcmd;
159 service->layer.modemcmd = cfservl_modemcmd;
160 service->dev_info = *dev_info;
161}
162
163bool cfsrvl_ready(struct cfsrvl *service, int *err)
164{
165 if (service->open && service->modem_flow_on && service->phy_flow_on)
166 return true;
167 if (!service->open) {
168 *err = -ENOTCONN;
169 return false;
170 }
171 caif_assert(!(service->modem_flow_on && service->phy_flow_on));
172 *err = -EAGAIN;
173 return false;
174}
175u8 cfsrvl_getphyid(struct cflayer *layer)
176{
177 struct cfsrvl *servl = container_obj(layer);
178 return servl->dev_info.id;
179}
180
181bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
182{
183 struct cfsrvl *servl = container_obj(layer);
184 return servl->dev_info.id == phyid;
185}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
new file mode 100644
index 000000000000..5fd2c9ea8b42
--- /dev/null
+++ b/net/caif/cfutill.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16#define UTIL_PAYLOAD 0x00
17#define UTIL_CMD_BIT 0x80
18#define UTIL_REMOTE_SHUTDOWN 0x82
19#define UTIL_FLOW_OFF 0x81
20#define UTIL_FLOW_ON 0x80
21#define UTIL_CTRL_PKT_SIZE 1
22static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
23static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
24
25struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
26{
27 struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
28 if (!util) {
29 pr_warning("CAIF: %s(): Out of memory\n", __func__);
30 return NULL;
31 }
32 caif_assert(offsetof(struct cfsrvl, layer) == 0);
33 memset(util, 0, sizeof(struct cfsrvl));
34 cfsrvl_init(util, channel_id, dev_info);
35 util->layer.receive = cfutill_receive;
36 util->layer.transmit = cfutill_transmit;
37 snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
38 return &util->layer;
39}
40
41static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
42{
43 u8 cmd = -1;
44 struct cfsrvl *service = container_obj(layr);
45 caif_assert(layr != NULL);
46 caif_assert(layr->up != NULL);
47 caif_assert(layr->up->receive != NULL);
48 caif_assert(layr->up->ctrlcmd != NULL);
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
51 cfpkt_destroy(pkt);
52 return -EPROTO;
53 }
54
55 switch (cmd) {
56 case UTIL_PAYLOAD:
57 return layr->up->receive(layr->up, pkt);
58 case UTIL_FLOW_OFF:
59 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
60 cfpkt_destroy(pkt);
61 return 0;
62 case UTIL_FLOW_ON:
63 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
64 cfpkt_destroy(pkt);
65 return 0;
66 case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
67 pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
68 __func__);
69 layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
70 service->open = false;
71 cfpkt_destroy(pkt);
72 return 0;
73 default:
74 cfpkt_destroy(pkt);
75 pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
76 __func__, cmd, cmd);
77 return -EPROTO;
78 }
79}
80
81static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
82{
83 u8 zero = 0;
84 struct caif_payload_info *info;
85 int ret;
86 struct cfsrvl *service = container_obj(layr);
87 caif_assert(layr != NULL);
88 caif_assert(layr->dn != NULL);
89 caif_assert(layr->dn->transmit != NULL);
90 if (!cfsrvl_ready(service, &ret))
91 return ret;
92
93 if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
94 pr_err("CAIF: %s(): packet too large size=%d\n",
95 __func__, cfpkt_getlen(pkt));
96 return -EOVERFLOW;
97 }
98
99 cfpkt_add_head(pkt, &zero, 1);
100 /* Add info for MUX-layer to route the packet out. */
101 info = cfpkt_info(pkt);
102 info->channel_id = service->layer.id;
103 /*
104 * To optimize alignment, we add up the size of CAIF header before
105 * payload.
106 */
107 info->hdr_len = 1;
108 info->dev_info = &service->dev_info;
109 ret = layr->dn->transmit(layr->dn, pkt);
110 if (ret < 0) {
111 u32 tmp32;
112 cfpkt_extr_head(pkt, &tmp32, 4);
113 }
114 return ret;
115}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
new file mode 100644
index 000000000000..0fd827f49491
--- /dev/null
+++ b/net/caif/cfveil.c
@@ -0,0 +1,107 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/stddef.h>
8#include <linux/slab.h>
9#include <net/caif/caif_layer.h>
10#include <net/caif/cfsrvl.h>
11#include <net/caif/cfpkt.h>
12
13#define VEI_PAYLOAD 0x00
14#define VEI_CMD_BIT 0x80
15#define VEI_FLOW_OFF 0x81
16#define VEI_FLOW_ON 0x80
17#define VEI_SET_PIN 0x82
18#define VEI_CTRL_PKT_SIZE 1
19#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
20
21static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
22static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
23
24struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
25{
26 struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
27 if (!vei) {
28 pr_warning("CAIF: %s(): Out of memory\n", __func__);
29 return NULL;
30 }
31 caif_assert(offsetof(struct cfsrvl, layer) == 0);
32 memset(vei, 0, sizeof(struct cfsrvl));
33 cfsrvl_init(vei, channel_id, dev_info);
34 vei->layer.receive = cfvei_receive;
35 vei->layer.transmit = cfvei_transmit;
36 snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
37 return &vei->layer;
38}
39
40static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
41{
42 u8 cmd;
43 int ret;
44 caif_assert(layr->up != NULL);
45 caif_assert(layr->receive != NULL);
46 caif_assert(layr->ctrlcmd != NULL);
47
48
49 if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
50 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
51 cfpkt_destroy(pkt);
52 return -EPROTO;
53 }
54 switch (cmd) {
55 case VEI_PAYLOAD:
56 ret = layr->up->receive(layr->up, pkt);
57 return ret;
58 case VEI_FLOW_OFF:
59 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
60 cfpkt_destroy(pkt);
61 return 0;
62 case VEI_FLOW_ON:
63 layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
64 cfpkt_destroy(pkt);
65 return 0;
66 case VEI_SET_PIN: /* SET RS232 PIN */
67 cfpkt_destroy(pkt);
68 return 0;
69 default: /* SET RS232 PIN */
70 pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
71 __func__, cmd, cmd);
72 cfpkt_destroy(pkt);
73 return -EPROTO;
74 }
75}
76
77static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
78{
79 u8 tmp = 0;
80 struct caif_payload_info *info;
81 int ret;
82 struct cfsrvl *service = container_obj(layr);
83 if (!cfsrvl_ready(service, &ret))
84 return ret;
85 caif_assert(layr->dn != NULL);
86 caif_assert(layr->dn->transmit != NULL);
87 if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
88 pr_warning("CAIF: %s(): Packet too large - size=%d\n",
89 __func__, cfpkt_getlen(pkt));
90 return -EOVERFLOW;
91 }
92
93 if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
94 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
95 return -EPROTO;
96 }
97
98 /* Add info-> for MUX-layer to route the packet out. */
99 info = cfpkt_info(pkt);
100 info->channel_id = service->layer.id;
101 info->hdr_len = 1;
102 info->dev_info = &service->dev_info;
103 ret = layr->dn->transmit(layr->dn, pkt);
104 if (ret < 0)
105 cfpkt_extr_head(pkt, &tmp, 1);
106 return ret;
107}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
new file mode 100644
index 000000000000..89ad4ea239f1
--- /dev/null
+++ b/net/caif/cfvidl.c
@@ -0,0 +1,65 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * License terms: GNU General Public License (GPL) version 2
5 */
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/errno.h>
11#include <net/caif/caif_layer.h>
12#include <net/caif/cfsrvl.h>
13#include <net/caif/cfpkt.h>
14
15#define container_obj(layr) ((struct cfsrvl *) layr)
16
17static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
18static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
19
20struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
21{
22 struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
23 if (!vid) {
24 pr_warning("CAIF: %s(): Out of memory\n", __func__);
25 return NULL;
26 }
27 caif_assert(offsetof(struct cfsrvl, layer) == 0);
28
29 memset(vid, 0, sizeof(struct cfsrvl));
30 cfsrvl_init(vid, channel_id, dev_info);
31 vid->layer.receive = cfvidl_receive;
32 vid->layer.transmit = cfvidl_transmit;
33 snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
34 return &vid->layer;
35}
36
37static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
38{
39 u32 videoheader;
40 if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
41 pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
42 cfpkt_destroy(pkt);
43 return -EPROTO;
44 }
45 return layr->up->receive(layr->up, pkt);
46}
47
48static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
49{
50 struct cfsrvl *service = container_obj(layr);
51 struct caif_payload_info *info;
52 u32 videoheader = 0;
53 int ret;
54 if (!cfsrvl_ready(service, &ret))
55 return ret;
56 cfpkt_add_head(pkt, &videoheader, 4);
57 /* Add info for MUX-layer to route the packet out */
58 info = cfpkt_info(pkt);
59 info->channel_id = service->layer.id;
60 info->dev_info = &service->dev_info;
61 ret = layr->dn->transmit(layr->dn, pkt);
62 if (ret < 0)
63 cfpkt_extr_head(pkt, &videoheader, 4);
64 return ret;
65}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 000000000000..f622ff1d39ba
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,451 @@
1/*
2 * Copyright (C) ST-Ericsson AB 2010
3 * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com
4 * Daniel Martensson / Daniel.Martensson@stericsson.com
5 * License terms: GNU General Public License (GPL) version 2
6 */
7
8#include <linux/version.h>
9#include <linux/fs.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/netdevice.h>
13#include <linux/if_ether.h>
14#include <linux/moduleparam.h>
15#include <linux/ip.h>
16#include <linux/sched.h>
17#include <linux/sockios.h>
18#include <linux/caif/if_caif.h>
19#include <net/rtnetlink.h>
20#include <net/caif/caif_layer.h>
21#include <net/caif/cfcnfg.h>
22#include <net/caif/cfpkt.h>
23#include <net/caif/caif_dev.h>
24
25#define CAIF_CONNECT_TIMEOUT 30
26#define SIZE_MTU 1500
27#define SIZE_MTU_MAX 4080
28#define SIZE_MTU_MIN 68
29#define CAIF_NET_DEFAULT_QUEUE_LEN 500
30
31#undef pr_debug
32#define pr_debug pr_warning
33
34/*This list is protected by the rtnl lock. */
35static LIST_HEAD(chnl_net_list);
36
37MODULE_LICENSE("GPL");
38MODULE_ALIAS_RTNL_LINK("caif");
39
40struct chnl_net {
41 struct cflayer chnl;
42 struct net_device_stats stats;
43 struct caif_connect_request conn_req;
44 struct list_head list_field;
45 struct net_device *netdev;
46 char name[256];
47 wait_queue_head_t netmgmt_wq;
48 /* Flow status to remember and control the transmission. */
49 bool flowenabled;
50 bool pending_close;
51};
52
53static void robust_list_del(struct list_head *delete_node)
54{
55 struct list_head *list_node;
56 struct list_head *n;
57 ASSERT_RTNL();
58 list_for_each_safe(list_node, n, &chnl_net_list) {
59 if (list_node == delete_node) {
60 list_del(list_node);
61 break;
62 }
63 }
64}
65
66static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
67{
68 struct sk_buff *skb;
69 struct chnl_net *priv = NULL;
70 int pktlen;
71 int err = 0;
72
73 priv = container_of(layr, struct chnl_net, chnl);
74
75 if (!priv)
76 return -EINVAL;
77
78 /* Get length of CAIF packet. */
79 pktlen = cfpkt_getlen(pkt);
80
81 skb = (struct sk_buff *) cfpkt_tonative(pkt);
82 /* Pass some minimum information and
83 * send the packet to the net stack.
84 */
85 skb->dev = priv->netdev;
86 skb->protocol = htons(ETH_P_IP);
87
88 /* If we change the header in loop mode, the checksum is corrupted. */
89 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
90 skb->ip_summed = CHECKSUM_UNNECESSARY;
91 else
92 skb->ip_summed = CHECKSUM_NONE;
93
94 /* FIXME: Drivers should call this in tasklet context. */
95 if (in_interrupt())
96 netif_rx(skb);
97 else
98 netif_rx_ni(skb);
99
100 /* Update statistics. */
101 priv->netdev->stats.rx_packets++;
102 priv->netdev->stats.rx_bytes += pktlen;
103
104 return err;
105}
106
107static int delete_device(struct chnl_net *dev)
108{
109 ASSERT_RTNL();
110 if (dev->netdev)
111 unregister_netdevice(dev->netdev);
112 return 0;
113}
114
115static void close_work(struct work_struct *work)
116{
117 struct chnl_net *dev = NULL;
118 struct list_head *list_node;
119 struct list_head *_tmp;
120 rtnl_lock();
121 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
122 dev = list_entry(list_node, struct chnl_net, list_field);
123 if (!dev->pending_close)
124 continue;
125 list_del(list_node);
126 delete_device(dev);
127 }
128 rtnl_unlock();
129}
130static DECLARE_WORK(close_worker, close_work);
131
132static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
133 int phyid)
134{
135 struct chnl_net *priv;
136 pr_debug("CAIF: %s(): NET flowctrl func called flow: %s.\n",
137 __func__,
138 flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
139 flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
140 flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
141 flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
142 flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
143 flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
144 "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
145
146 priv = container_of(layr, struct chnl_net, chnl);
147
148 switch (flow) {
149 case CAIF_CTRLCMD_FLOW_OFF_IND:
150 case CAIF_CTRLCMD_DEINIT_RSP:
151 case CAIF_CTRLCMD_INIT_FAIL_RSP:
152 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
153 priv->flowenabled = false;
154 netif_tx_disable(priv->netdev);
155 pr_warning("CAIF: %s(): done\n", __func__);
156 priv->pending_close = 1;
157 schedule_work(&close_worker);
158 break;
159 case CAIF_CTRLCMD_FLOW_ON_IND:
160 case CAIF_CTRLCMD_INIT_RSP:
161 priv->flowenabled = true;
162 netif_wake_queue(priv->netdev);
163 wake_up_interruptible(&priv->netmgmt_wq);
164 break;
165 default:
166 break;
167 }
168}
169
170static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
171{
172 struct chnl_net *priv;
173 struct cfpkt *pkt = NULL;
174 int len;
175 int result = -1;
176 /* Get our private data. */
177 priv = netdev_priv(dev);
178
179 if (skb->len > priv->netdev->mtu) {
180 pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
181 return -ENOSPC;
182 }
183
184 if (!priv->flowenabled) {
185 pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
186 return NETDEV_TX_BUSY;
187 }
188
189 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
190 swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
191
192 /* Store original SKB length. */
193 len = skb->len;
194
195 pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
196
197 pr_debug("CAIF: %s(): transmit inst %s %d,%p\n",
198 __func__, dev->name, priv->chnl.dn->id, &priv->chnl.dn);
199
200 /* Send the packet down the stack. */
201 result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
202 if (result) {
203 if (result == -EAGAIN)
204 result = NETDEV_TX_BUSY;
205 return result;
206 }
207
208 /* Update statistics. */
209 dev->stats.tx_packets++;
210 dev->stats.tx_bytes += len;
211
212 return NETDEV_TX_OK;
213}
214
215static int chnl_net_open(struct net_device *dev)
216{
217 struct chnl_net *priv = NULL;
218 int result = -1;
219 ASSERT_RTNL();
220
221 priv = netdev_priv(dev);
222 pr_debug("CAIF: %s(): dev name: %s\n", __func__, priv->name);
223
224 if (!priv) {
225 pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
226 return -ENODEV;
227 }
228 result = caif_connect_client(&priv->conn_req, &priv->chnl);
229 if (result != 0) {
230 pr_debug("CAIF: %s(): err: "
231 "Unable to register and open device, Err:%d\n",
232 __func__,
233 result);
234 return -ENODEV;
235 }
236 result = wait_event_interruptible(priv->netmgmt_wq, priv->flowenabled);
237
238 if (result == -ERESTARTSYS) {
239 pr_debug("CAIF: %s(): wait_event_interruptible"
240 " woken by a signal\n", __func__);
241 return -ERESTARTSYS;
242 } else
243 pr_debug("CAIF: %s(): Flow on recieved\n", __func__);
244
245 return 0;
246}
247
248static int chnl_net_stop(struct net_device *dev)
249{
250 struct chnl_net *priv;
251 int result = -1;
252 ASSERT_RTNL();
253 priv = netdev_priv(dev);
254
255 result = caif_disconnect_client(&priv->chnl);
256 if (result != 0) {
257 pr_debug("CAIF: %s(): chnl_net_stop: err: "
258 "Unable to STOP device, Err:%d\n",
259 __func__, result);
260 return -EBUSY;
261 }
262 result = wait_event_interruptible(priv->netmgmt_wq,
263 !priv->flowenabled);
264
265 if (result == -ERESTARTSYS) {
266 pr_debug("CAIF: %s(): wait_event_interruptible woken by"
267 " signal, signal_pending(current) = %d\n",
268 __func__,
269 signal_pending(current));
270 } else {
271 pr_debug("CAIF: %s(): disconnect received\n", __func__);
272
273 }
274
275 return 0;
276}
277
278static int chnl_net_init(struct net_device *dev)
279{
280 struct chnl_net *priv;
281 ASSERT_RTNL();
282 priv = netdev_priv(dev);
283 strncpy(priv->name, dev->name, sizeof(priv->name));
284 return 0;
285}
286
287static void chnl_net_uninit(struct net_device *dev)
288{
289 struct chnl_net *priv;
290 ASSERT_RTNL();
291 priv = netdev_priv(dev);
292 robust_list_del(&priv->list_field);
293}
294
295static const struct net_device_ops netdev_ops = {
296 .ndo_open = chnl_net_open,
297 .ndo_stop = chnl_net_stop,
298 .ndo_init = chnl_net_init,
299 .ndo_uninit = chnl_net_uninit,
300 .ndo_start_xmit = chnl_net_start_xmit,
301};
302
303static void ipcaif_net_setup(struct net_device *dev)
304{
305 struct chnl_net *priv;
306 dev->netdev_ops = &netdev_ops;
307 dev->destructor = free_netdev;
308 dev->flags |= IFF_NOARP;
309 dev->flags |= IFF_POINTOPOINT;
310 dev->needed_headroom = CAIF_NEEDED_HEADROOM;
311 dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
312 dev->mtu = SIZE_MTU;
313 dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
314
315 priv = netdev_priv(dev);
316 priv->chnl.receive = chnl_recv_cb;
317 priv->chnl.ctrlcmd = chnl_flowctrl_cb;
318 priv->netdev = dev;
319 priv->conn_req.protocol = CAIFPROTO_DATAGRAM;
320 priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
321 priv->conn_req.priority = CAIF_PRIO_LOW;
322 /* Insert illegal value */
323 priv->conn_req.sockaddr.u.dgm.connection_id = -1;
324 priv->flowenabled = false;
325
326 ASSERT_RTNL();
327 init_waitqueue_head(&priv->netmgmt_wq);
328 list_add(&priv->list_field, &chnl_net_list);
329}
330
331
332static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
333{
334 struct chnl_net *priv;
335 u8 loop;
336 priv = netdev_priv(dev);
337 NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
338 priv->conn_req.sockaddr.u.dgm.connection_id);
339 NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
340 priv->conn_req.sockaddr.u.dgm.connection_id);
341 loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
342 NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
343
344
345 return 0;
346nla_put_failure:
347 return -EMSGSIZE;
348
349}
350
351static void caif_netlink_parms(struct nlattr *data[],
352 struct caif_connect_request *conn_req)
353{
354 if (!data) {
355 pr_warning("CAIF: %s: no params data found\n", __func__);
356 return;
357 }
358 if (data[IFLA_CAIF_IPV4_CONNID])
359 conn_req->sockaddr.u.dgm.connection_id =
360 nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
361 if (data[IFLA_CAIF_IPV6_CONNID])
362 conn_req->sockaddr.u.dgm.connection_id =
363 nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
364 if (data[IFLA_CAIF_LOOPBACK]) {
365 if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
366 conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP;
367 else
368 conn_req->protocol = CAIFPROTO_DATAGRAM;
369 }
370}
371
372static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
373 struct nlattr *tb[], struct nlattr *data[])
374{
375 int ret;
376 struct chnl_net *caifdev;
377 ASSERT_RTNL();
378 caifdev = netdev_priv(dev);
379 caif_netlink_parms(data, &caifdev->conn_req);
380 ret = register_netdevice(dev);
381 if (ret)
382 pr_warning("CAIF: %s(): device rtml registration failed\n",
383 __func__);
384 return ret;
385}
386
387static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
388 struct nlattr *data[])
389{
390 struct chnl_net *caifdev;
391 ASSERT_RTNL();
392 caifdev = netdev_priv(dev);
393 caif_netlink_parms(data, &caifdev->conn_req);
394 netdev_state_change(dev);
395 return 0;
396}
397
398static size_t ipcaif_get_size(const struct net_device *dev)
399{
400 return
401 /* IFLA_CAIF_IPV4_CONNID */
402 nla_total_size(4) +
403 /* IFLA_CAIF_IPV6_CONNID */
404 nla_total_size(4) +
405 /* IFLA_CAIF_LOOPBACK */
406 nla_total_size(2) +
407 0;
408}
409
410static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
411 [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
412 [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
413 [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
414};
415
416
417static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
418 .kind = "caif",
419 .priv_size = sizeof(struct chnl_net),
420 .setup = ipcaif_net_setup,
421 .maxtype = IFLA_CAIF_MAX,
422 .policy = ipcaif_policy,
423 .newlink = ipcaif_newlink,
424 .changelink = ipcaif_changelink,
425 .get_size = ipcaif_get_size,
426 .fill_info = ipcaif_fill_info,
427
428};
429
430static int __init chnl_init_module(void)
431{
432 return rtnl_link_register(&ipcaif_link_ops);
433}
434
435static void __exit chnl_exit_module(void)
436{
437 struct chnl_net *dev = NULL;
438 struct list_head *list_node;
439 struct list_head *_tmp;
440 rtnl_link_unregister(&ipcaif_link_ops);
441 rtnl_lock();
442 list_for_each_safe(list_node, _tmp, &chnl_net_list) {
443 dev = list_entry(list_node, struct chnl_net, list_field);
444 list_del(list_node);
445 delete_device(dev);
446 }
447 rtnl_unlock();
448}
449
450module_init(chnl_init_module);
451module_exit(chnl_exit_module);
diff --git a/net/core/Makefile b/net/core/Makefile
index 08791ac3e05a..51c3eec850ef 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -7,7 +7,7 @@ obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
7 7
8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o 8obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
9 9
10obj-y += dev.o ethtool.o dev_mcast.o dst.o netevent.o \ 10obj-y += dev.o ethtool.o dev_addr_lists.o dst.o netevent.o \
11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o 11 neighbour.o rtnetlink.o utils.o link_watch.o filter.o
12 12
13obj-$(CONFIG_XFRM) += flow.o 13obj-$(CONFIG_XFRM) += flow.o
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 2dccd4ee591b..5574a5ddf908 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -86,7 +86,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
86 int error; 86 int error;
87 DEFINE_WAIT_FUNC(wait, receiver_wake_function); 87 DEFINE_WAIT_FUNC(wait, receiver_wake_function);
88 88
89 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 89 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
90 90
91 /* Socket errors? */ 91 /* Socket errors? */
92 error = sock_error(sk); 92 error = sock_error(sk);
@@ -115,7 +115,7 @@ static int wait_for_packet(struct sock *sk, int *err, long *timeo_p)
115 error = 0; 115 error = 0;
116 *timeo_p = schedule_timeout(*timeo_p); 116 *timeo_p = schedule_timeout(*timeo_p);
117out: 117out:
118 finish_wait(sk->sk_sleep, &wait); 118 finish_wait(sk_sleep(sk), &wait);
119 return error; 119 return error;
120interrupted: 120interrupted:
121 error = sock_intr_errno(*timeo_p); 121 error = sock_intr_errno(*timeo_p);
@@ -726,7 +726,7 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
726 struct sock *sk = sock->sk; 726 struct sock *sk = sock->sk;
727 unsigned int mask; 727 unsigned int mask;
728 728
729 sock_poll_wait(file, sk->sk_sleep, wait); 729 sock_poll_wait(file, sk_sleep(sk), wait);
730 mask = 0; 730 mask = 0;
731 731
732 /* exceptional events? */ 732 /* exceptional events? */
diff --git a/net/core/dev.c b/net/core/dev.c
index f769098774b7..e904c476b112 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -130,6 +130,7 @@
130#include <linux/jhash.h> 130#include <linux/jhash.h>
131#include <linux/random.h> 131#include <linux/random.h>
132#include <trace/events/napi.h> 132#include <trace/events/napi.h>
133#include <linux/pci.h>
133 134
134#include "net-sysfs.h" 135#include "net-sysfs.h"
135 136
@@ -207,6 +208,20 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
207 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)]; 208 return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
208} 209}
209 210
211static inline void rps_lock(struct softnet_data *sd)
212{
213#ifdef CONFIG_RPS
214 spin_lock(&sd->input_pkt_queue.lock);
215#endif
216}
217
218static inline void rps_unlock(struct softnet_data *sd)
219{
220#ifdef CONFIG_RPS
221 spin_unlock(&sd->input_pkt_queue.lock);
222#endif
223}
224
210/* Device list insertion */ 225/* Device list insertion */
211static int list_netdevice(struct net_device *dev) 226static int list_netdevice(struct net_device *dev)
212{ 227{
@@ -249,7 +264,7 @@ static RAW_NOTIFIER_HEAD(netdev_chain);
249 * queue in the local softnet handler. 264 * queue in the local softnet handler.
250 */ 265 */
251 266
252DEFINE_PER_CPU(struct softnet_data, softnet_data); 267DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
253EXPORT_PER_CPU_SYMBOL(softnet_data); 268EXPORT_PER_CPU_SYMBOL(softnet_data);
254 269
255#ifdef CONFIG_LOCKDEP 270#ifdef CONFIG_LOCKDEP
@@ -773,14 +788,17 @@ EXPORT_SYMBOL(__dev_getfirstbyhwtype);
773 788
774struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type) 789struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
775{ 790{
776 struct net_device *dev; 791 struct net_device *dev, *ret = NULL;
777 792
778 rtnl_lock(); 793 rcu_read_lock();
779 dev = __dev_getfirstbyhwtype(net, type); 794 for_each_netdev_rcu(net, dev)
780 if (dev) 795 if (dev->type == type) {
781 dev_hold(dev); 796 dev_hold(dev);
782 rtnl_unlock(); 797 ret = dev;
783 return dev; 798 break;
799 }
800 rcu_read_unlock();
801 return ret;
784} 802}
785EXPORT_SYMBOL(dev_getfirstbyhwtype); 803EXPORT_SYMBOL(dev_getfirstbyhwtype);
786 804
@@ -1085,9 +1103,9 @@ void netdev_state_change(struct net_device *dev)
1085} 1103}
1086EXPORT_SYMBOL(netdev_state_change); 1104EXPORT_SYMBOL(netdev_state_change);
1087 1105
1088void netdev_bonding_change(struct net_device *dev, unsigned long event) 1106int netdev_bonding_change(struct net_device *dev, unsigned long event)
1089{ 1107{
1090 call_netdevice_notifiers(event, dev); 1108 return call_netdevice_notifiers(event, dev);
1091} 1109}
1092EXPORT_SYMBOL(netdev_bonding_change); 1110EXPORT_SYMBOL(netdev_bonding_change);
1093 1111
@@ -1417,6 +1435,7 @@ EXPORT_SYMBOL(unregister_netdevice_notifier);
1417 1435
1418int call_netdevice_notifiers(unsigned long val, struct net_device *dev) 1436int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1419{ 1437{
1438 ASSERT_RTNL();
1420 return raw_notifier_call_chain(&netdev_chain, val, dev); 1439 return raw_notifier_call_chain(&netdev_chain, val, dev);
1421} 1440}
1422 1441
@@ -1784,18 +1803,27 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
1784 * 2. No high memory really exists on this machine. 1803 * 2. No high memory really exists on this machine.
1785 */ 1804 */
1786 1805
1787static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb) 1806static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
1788{ 1807{
1789#ifdef CONFIG_HIGHMEM 1808#ifdef CONFIG_HIGHMEM
1790 int i; 1809 int i;
1810 if (!(dev->features & NETIF_F_HIGHDMA)) {
1811 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1812 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1813 return 1;
1814 }
1791 1815
1792 if (dev->features & NETIF_F_HIGHDMA) 1816 if (PCI_DMA_BUS_IS_PHYS) {
1793 return 0; 1817 struct device *pdev = dev->dev.parent;
1794
1795 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
1796 if (PageHighMem(skb_shinfo(skb)->frags[i].page))
1797 return 1;
1798 1818
1819 if (!pdev)
1820 return 0;
1821 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1822 dma_addr_t addr = page_to_phys(skb_shinfo(skb)->frags[i].page);
1823 if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
1824 return 1;
1825 }
1826 }
1799#endif 1827#endif
1800 return 0; 1828 return 0;
1801} 1829}
@@ -1853,6 +1881,17 @@ static int dev_gso_segment(struct sk_buff *skb)
1853 return 0; 1881 return 0;
1854} 1882}
1855 1883
1884/*
1885 * Try to orphan skb early, right before transmission by the device.
1886 * We cannot orphan skb if tx timestamp is requested, since
1887 * drivers need to call skb_tstamp_tx() to send the timestamp.
1888 */
1889static inline void skb_orphan_try(struct sk_buff *skb)
1890{
1891 if (!skb_tx(skb)->flags)
1892 skb_orphan(skb);
1893}
1894
1856int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 1895int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1857 struct netdev_queue *txq) 1896 struct netdev_queue *txq)
1858{ 1897{
@@ -1877,23 +1916,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
1877 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1916 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1878 skb_dst_drop(skb); 1917 skb_dst_drop(skb);
1879 1918
1919 skb_orphan_try(skb);
1880 rc = ops->ndo_start_xmit(skb, dev); 1920 rc = ops->ndo_start_xmit(skb, dev);
1881 if (rc == NETDEV_TX_OK) 1921 if (rc == NETDEV_TX_OK)
1882 txq_trans_update(txq); 1922 txq_trans_update(txq);
1883 /*
1884 * TODO: if skb_orphan() was called by
1885 * dev->hard_start_xmit() (for example, the unmodified
1886 * igb driver does that; bnx2 doesn't), then
1887 * skb_tx_software_timestamp() will be unable to send
1888 * back the time stamp.
1889 *
1890 * How can this be prevented? Always create another
1891 * reference to the socket before calling
1892 * dev->hard_start_xmit()? Prevent that skb_orphan()
1893 * does anything in dev->hard_start_xmit() by clearing
1894 * the skb destructor before the call and restoring it
1895 * afterwards, then doing the skb_orphan() ourselves?
1896 */
1897 return rc; 1923 return rc;
1898 } 1924 }
1899 1925
@@ -1911,6 +1937,7 @@ gso:
1911 if (dev->priv_flags & IFF_XMIT_DST_RELEASE) 1937 if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
1912 skb_dst_drop(nskb); 1938 skb_dst_drop(nskb);
1913 1939
1940 skb_orphan_try(nskb);
1914 rc = ops->ndo_start_xmit(nskb, dev); 1941 rc = ops->ndo_start_xmit(nskb, dev);
1915 if (unlikely(rc != NETDEV_TX_OK)) { 1942 if (unlikely(rc != NETDEV_TX_OK)) {
1916 if (rc & ~NETDEV_TX_MASK) 1943 if (rc & ~NETDEV_TX_MASK)
@@ -1932,7 +1959,7 @@ out_kfree_skb:
1932 return rc; 1959 return rc;
1933} 1960}
1934 1961
1935static u32 skb_tx_hashrnd; 1962static u32 hashrnd __read_mostly;
1936 1963
1937u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb) 1964u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1938{ 1965{
@@ -1948,9 +1975,9 @@ u16 skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb)
1948 if (skb->sk && skb->sk->sk_hash) 1975 if (skb->sk && skb->sk->sk_hash)
1949 hash = skb->sk->sk_hash; 1976 hash = skb->sk->sk_hash;
1950 else 1977 else
1951 hash = skb->protocol; 1978 hash = (__force u16) skb->protocol;
1952 1979
1953 hash = jhash_1word(hash, skb_tx_hashrnd); 1980 hash = jhash_1word(hash, hashrnd);
1954 1981
1955 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32); 1982 return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
1956} 1983}
@@ -1960,10 +1987,9 @@ static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
1960{ 1987{
1961 if (unlikely(queue_index >= dev->real_num_tx_queues)) { 1988 if (unlikely(queue_index >= dev->real_num_tx_queues)) {
1962 if (net_ratelimit()) { 1989 if (net_ratelimit()) {
1963 WARN(1, "%s selects TX queue %d, but " 1990 pr_warning("%s selects TX queue %d, but "
1964 "real number of TX queues is %d\n", 1991 "real number of TX queues is %d\n",
1965 dev->name, queue_index, 1992 dev->name, queue_index, dev->real_num_tx_queues);
1966 dev->real_num_tx_queues);
1967 } 1993 }
1968 return 0; 1994 return 0;
1969 } 1995 }
@@ -1990,7 +2016,7 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
1990 queue_index = skb_tx_hash(dev, skb); 2016 queue_index = skb_tx_hash(dev, skb);
1991 2017
1992 if (sk) { 2018 if (sk) {
1993 struct dst_entry *dst = rcu_dereference_bh(sk->sk_dst_cache); 2019 struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
1994 2020
1995 if (dst && skb_dst(skb) == dst) 2021 if (dst && skb_dst(skb) == dst)
1996 sk_tx_queue_set(sk, queue_index); 2022 sk_tx_queue_set(sk, queue_index);
@@ -2180,6 +2206,237 @@ int weight_p __read_mostly = 64; /* old backlog weight */
2180 2206
2181DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, }; 2207DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2182 2208
2209#ifdef CONFIG_RPS
2210
2211/* One global table that all flow-based protocols share. */
2212struct rps_sock_flow_table *rps_sock_flow_table __read_mostly;
2213EXPORT_SYMBOL(rps_sock_flow_table);
2214
2215/*
2216 * get_rps_cpu is called from netif_receive_skb and returns the target
2217 * CPU from the RPS map of the receiving queue for a given skb.
2218 * rcu_read_lock must be held on entry.
2219 */
2220static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
2221 struct rps_dev_flow **rflowp)
2222{
2223 struct ipv6hdr *ip6;
2224 struct iphdr *ip;
2225 struct netdev_rx_queue *rxqueue;
2226 struct rps_map *map;
2227 struct rps_dev_flow_table *flow_table;
2228 struct rps_sock_flow_table *sock_flow_table;
2229 int cpu = -1;
2230 u8 ip_proto;
2231 u16 tcpu;
2232 u32 addr1, addr2, ports, ihl;
2233
2234 if (skb_rx_queue_recorded(skb)) {
2235 u16 index = skb_get_rx_queue(skb);
2236 if (unlikely(index >= dev->num_rx_queues)) {
2237 if (net_ratelimit()) {
2238 pr_warning("%s received packet on queue "
2239 "%u, but number of RX queues is %u\n",
2240 dev->name, index, dev->num_rx_queues);
2241 }
2242 goto done;
2243 }
2244 rxqueue = dev->_rx + index;
2245 } else
2246 rxqueue = dev->_rx;
2247
2248 if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
2249 goto done;
2250
2251 if (skb->rxhash)
2252 goto got_hash; /* Skip hash computation on packet header */
2253
2254 switch (skb->protocol) {
2255 case __constant_htons(ETH_P_IP):
2256 if (!pskb_may_pull(skb, sizeof(*ip)))
2257 goto done;
2258
2259 ip = (struct iphdr *) skb->data;
2260 ip_proto = ip->protocol;
2261 addr1 = (__force u32) ip->saddr;
2262 addr2 = (__force u32) ip->daddr;
2263 ihl = ip->ihl;
2264 break;
2265 case __constant_htons(ETH_P_IPV6):
2266 if (!pskb_may_pull(skb, sizeof(*ip6)))
2267 goto done;
2268
2269 ip6 = (struct ipv6hdr *) skb->data;
2270 ip_proto = ip6->nexthdr;
2271 addr1 = (__force u32) ip6->saddr.s6_addr32[3];
2272 addr2 = (__force u32) ip6->daddr.s6_addr32[3];
2273 ihl = (40 >> 2);
2274 break;
2275 default:
2276 goto done;
2277 }
2278 ports = 0;
2279 switch (ip_proto) {
2280 case IPPROTO_TCP:
2281 case IPPROTO_UDP:
2282 case IPPROTO_DCCP:
2283 case IPPROTO_ESP:
2284 case IPPROTO_AH:
2285 case IPPROTO_SCTP:
2286 case IPPROTO_UDPLITE:
2287 if (pskb_may_pull(skb, (ihl * 4) + 4)) {
2288 __be16 *hports = (__be16 *) (skb->data + (ihl * 4));
2289 u32 sport, dport;
2290
2291 sport = (__force u16) hports[0];
2292 dport = (__force u16) hports[1];
2293 if (dport < sport)
2294 swap(sport, dport);
2295 ports = (sport << 16) + dport;
2296 }
2297 break;
2298
2299 default:
2300 break;
2301 }
2302
2303 /* get a consistent hash (same value on both flow directions) */
2304 if (addr2 < addr1)
2305 swap(addr1, addr2);
2306 skb->rxhash = jhash_3words(addr1, addr2, ports, hashrnd);
2307 if (!skb->rxhash)
2308 skb->rxhash = 1;
2309
2310got_hash:
2311 flow_table = rcu_dereference(rxqueue->rps_flow_table);
2312 sock_flow_table = rcu_dereference(rps_sock_flow_table);
2313 if (flow_table && sock_flow_table) {
2314 u16 next_cpu;
2315 struct rps_dev_flow *rflow;
2316
2317 rflow = &flow_table->flows[skb->rxhash & flow_table->mask];
2318 tcpu = rflow->cpu;
2319
2320 next_cpu = sock_flow_table->ents[skb->rxhash &
2321 sock_flow_table->mask];
2322
2323 /*
2324 * If the desired CPU (where last recvmsg was done) is
2325 * different from current CPU (one in the rx-queue flow
2326 * table entry), switch if one of the following holds:
2327 * - Current CPU is unset (equal to RPS_NO_CPU).
2328 * - Current CPU is offline.
2329 * - The current CPU's queue tail has advanced beyond the
2330 * last packet that was enqueued using this table entry.
2331 * This guarantees that all previous packets for the flow
2332 * have been dequeued, thus preserving in order delivery.
2333 */
2334 if (unlikely(tcpu != next_cpu) &&
2335 (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
2336 ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
2337 rflow->last_qtail)) >= 0)) {
2338 tcpu = rflow->cpu = next_cpu;
2339 if (tcpu != RPS_NO_CPU)
2340 rflow->last_qtail = per_cpu(softnet_data,
2341 tcpu).input_queue_head;
2342 }
2343 if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
2344 *rflowp = rflow;
2345 cpu = tcpu;
2346 goto done;
2347 }
2348 }
2349
2350 map = rcu_dereference(rxqueue->rps_map);
2351 if (map) {
2352 tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
2353
2354 if (cpu_online(tcpu)) {
2355 cpu = tcpu;
2356 goto done;
2357 }
2358 }
2359
2360done:
2361 return cpu;
2362}
2363
2364/* Called from hardirq (IPI) context */
2365static void rps_trigger_softirq(void *data)
2366{
2367 struct softnet_data *sd = data;
2368
2369 __napi_schedule(&sd->backlog);
2370 __get_cpu_var(netdev_rx_stat).received_rps++;
2371}
2372
2373#endif /* CONFIG_RPS */
2374
2375/*
2376 * Check if this softnet_data structure is another cpu one
2377 * If yes, queue it to our IPI list and return 1
2378 * If no, return 0
2379 */
2380static int rps_ipi_queued(struct softnet_data *sd)
2381{
2382#ifdef CONFIG_RPS
2383 struct softnet_data *mysd = &__get_cpu_var(softnet_data);
2384
2385 if (sd != mysd) {
2386 sd->rps_ipi_next = mysd->rps_ipi_list;
2387 mysd->rps_ipi_list = sd;
2388
2389 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
2390 return 1;
2391 }
2392#endif /* CONFIG_RPS */
2393 return 0;
2394}
2395
2396/*
2397 * enqueue_to_backlog is called to queue an skb to a per CPU backlog
2398 * queue (may be a remote CPU queue).
2399 */
2400static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
2401 unsigned int *qtail)
2402{
2403 struct softnet_data *sd;
2404 unsigned long flags;
2405
2406 sd = &per_cpu(softnet_data, cpu);
2407
2408 local_irq_save(flags);
2409 __get_cpu_var(netdev_rx_stat).total++;
2410
2411 rps_lock(sd);
2412 if (sd->input_pkt_queue.qlen <= netdev_max_backlog) {
2413 if (sd->input_pkt_queue.qlen) {
2414enqueue:
2415 __skb_queue_tail(&sd->input_pkt_queue, skb);
2416#ifdef CONFIG_RPS
2417 *qtail = sd->input_queue_head + sd->input_pkt_queue.qlen;
2418#endif
2419 rps_unlock(sd);
2420 local_irq_restore(flags);
2421 return NET_RX_SUCCESS;
2422 }
2423
2424 /* Schedule NAPI for backlog device */
2425 if (napi_schedule_prep(&sd->backlog)) {
2426 if (!rps_ipi_queued(sd))
2427 __napi_schedule(&sd->backlog);
2428 }
2429 goto enqueue;
2430 }
2431
2432 rps_unlock(sd);
2433
2434 __get_cpu_var(netdev_rx_stat).dropped++;
2435 local_irq_restore(flags);
2436
2437 kfree_skb(skb);
2438 return NET_RX_DROP;
2439}
2183 2440
2184/** 2441/**
2185 * netif_rx - post buffer to the network code 2442 * netif_rx - post buffer to the network code
@@ -2198,8 +2455,7 @@ DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
2198 2455
2199int netif_rx(struct sk_buff *skb) 2456int netif_rx(struct sk_buff *skb)
2200{ 2457{
2201 struct softnet_data *queue; 2458 int ret;
2202 unsigned long flags;
2203 2459
2204 /* if netpoll wants it, pretend we never saw it */ 2460 /* if netpoll wants it, pretend we never saw it */
2205 if (netpoll_rx(skb)) 2461 if (netpoll_rx(skb))
@@ -2208,31 +2464,29 @@ int netif_rx(struct sk_buff *skb)
2208 if (!skb->tstamp.tv64) 2464 if (!skb->tstamp.tv64)
2209 net_timestamp(skb); 2465 net_timestamp(skb);
2210 2466
2211 /* 2467#ifdef CONFIG_RPS
2212 * The code is rearranged so that the path is the most 2468 {
2213 * short when CPU is congested, but is still operating. 2469 struct rps_dev_flow voidflow, *rflow = &voidflow;
2214 */ 2470 int cpu;
2215 local_irq_save(flags);
2216 queue = &__get_cpu_var(softnet_data);
2217 2471
2218 __get_cpu_var(netdev_rx_stat).total++; 2472 rcu_read_lock();
2219 if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
2220 if (queue->input_pkt_queue.qlen) {
2221enqueue:
2222 __skb_queue_tail(&queue->input_pkt_queue, skb);
2223 local_irq_restore(flags);
2224 return NET_RX_SUCCESS;
2225 }
2226 2473
2227 napi_schedule(&queue->backlog); 2474 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2228 goto enqueue; 2475 if (cpu < 0)
2229 } 2476 cpu = smp_processor_id();
2230 2477
2231 __get_cpu_var(netdev_rx_stat).dropped++; 2478 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2232 local_irq_restore(flags);
2233 2479
2234 kfree_skb(skb); 2480 rcu_read_unlock();
2235 return NET_RX_DROP; 2481 }
2482#else
2483 {
2484 unsigned int qtail;
2485 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
2486 put_cpu();
2487 }
2488#endif
2489 return ret;
2236} 2490}
2237EXPORT_SYMBOL(netif_rx); 2491EXPORT_SYMBOL(netif_rx);
2238 2492
@@ -2469,22 +2723,56 @@ void netif_nit_deliver(struct sk_buff *skb)
2469 rcu_read_unlock(); 2723 rcu_read_unlock();
2470} 2724}
2471 2725
2472/** 2726static inline void skb_bond_set_mac_by_master(struct sk_buff *skb,
2473 * netif_receive_skb - process receive buffer from network 2727 struct net_device *master)
2474 * @skb: buffer to process 2728{
2475 * 2729 if (skb->pkt_type == PACKET_HOST) {
2476 * netif_receive_skb() is the main receive data processing function. 2730 u16 *dest = (u16 *) eth_hdr(skb)->h_dest;
2477 * It always succeeds. The buffer may be dropped during processing 2731
2478 * for congestion control or by the protocol layers. 2732 memcpy(dest, master->dev_addr, ETH_ALEN);
2479 * 2733 }
2480 * This function may only be called from softirq context and interrupts 2734}
2481 * should be enabled. 2735
2482 * 2736/* On bonding slaves other than the currently active slave, suppress
2483 * Return values (usually ignored): 2737 * duplicates except for 802.3ad ETH_P_SLOW, alb non-mcast/bcast, and
2484 * NET_RX_SUCCESS: no congestion 2738 * ARP on active-backup slaves with arp_validate enabled.
2485 * NET_RX_DROP: packet was dropped
2486 */ 2739 */
2487int netif_receive_skb(struct sk_buff *skb) 2740int __skb_bond_should_drop(struct sk_buff *skb, struct net_device *master)
2741{
2742 struct net_device *dev = skb->dev;
2743
2744 if (master->priv_flags & IFF_MASTER_ARPMON)
2745 dev->last_rx = jiffies;
2746
2747 if ((master->priv_flags & IFF_MASTER_ALB) && master->br_port) {
2748 /* Do address unmangle. The local destination address
2749 * will be always the one master has. Provides the right
2750 * functionality in a bridge.
2751 */
2752 skb_bond_set_mac_by_master(skb, master);
2753 }
2754
2755 if (dev->priv_flags & IFF_SLAVE_INACTIVE) {
2756 if ((dev->priv_flags & IFF_SLAVE_NEEDARP) &&
2757 skb->protocol == __cpu_to_be16(ETH_P_ARP))
2758 return 0;
2759
2760 if (master->priv_flags & IFF_MASTER_ALB) {
2761 if (skb->pkt_type != PACKET_BROADCAST &&
2762 skb->pkt_type != PACKET_MULTICAST)
2763 return 0;
2764 }
2765 if (master->priv_flags & IFF_MASTER_8023AD &&
2766 skb->protocol == __cpu_to_be16(ETH_P_SLOW))
2767 return 0;
2768
2769 return 1;
2770 }
2771 return 0;
2772}
2773EXPORT_SYMBOL(__skb_bond_should_drop);
2774
2775static int __netif_receive_skb(struct sk_buff *skb)
2488{ 2776{
2489 struct packet_type *ptype, *pt_prev; 2777 struct packet_type *ptype, *pt_prev;
2490 struct net_device *orig_dev; 2778 struct net_device *orig_dev;
@@ -2595,20 +2883,64 @@ out:
2595 rcu_read_unlock(); 2883 rcu_read_unlock();
2596 return ret; 2884 return ret;
2597} 2885}
2886
2887/**
2888 * netif_receive_skb - process receive buffer from network
2889 * @skb: buffer to process
2890 *
2891 * netif_receive_skb() is the main receive data processing function.
2892 * It always succeeds. The buffer may be dropped during processing
2893 * for congestion control or by the protocol layers.
2894 *
2895 * This function may only be called from softirq context and interrupts
2896 * should be enabled.
2897 *
2898 * Return values (usually ignored):
2899 * NET_RX_SUCCESS: no congestion
2900 * NET_RX_DROP: packet was dropped
2901 */
2902int netif_receive_skb(struct sk_buff *skb)
2903{
2904#ifdef CONFIG_RPS
2905 struct rps_dev_flow voidflow, *rflow = &voidflow;
2906 int cpu, ret;
2907
2908 rcu_read_lock();
2909
2910 cpu = get_rps_cpu(skb->dev, skb, &rflow);
2911
2912 if (cpu >= 0) {
2913 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
2914 rcu_read_unlock();
2915 } else {
2916 rcu_read_unlock();
2917 ret = __netif_receive_skb(skb);
2918 }
2919
2920 return ret;
2921#else
2922 return __netif_receive_skb(skb);
2923#endif
2924}
2598EXPORT_SYMBOL(netif_receive_skb); 2925EXPORT_SYMBOL(netif_receive_skb);
2599 2926
2600/* Network device is going away, flush any packets still pending */ 2927/* Network device is going away, flush any packets still pending
2928 * Called with irqs disabled.
2929 */
2601static void flush_backlog(void *arg) 2930static void flush_backlog(void *arg)
2602{ 2931{
2603 struct net_device *dev = arg; 2932 struct net_device *dev = arg;
2604 struct softnet_data *queue = &__get_cpu_var(softnet_data); 2933 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2605 struct sk_buff *skb, *tmp; 2934 struct sk_buff *skb, *tmp;
2606 2935
2607 skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp) 2936 rps_lock(sd);
2937 skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp)
2608 if (skb->dev == dev) { 2938 if (skb->dev == dev) {
2609 __skb_unlink(skb, &queue->input_pkt_queue); 2939 __skb_unlink(skb, &sd->input_pkt_queue);
2610 kfree_skb(skb); 2940 kfree_skb(skb);
2941 input_queue_head_incr(sd);
2611 } 2942 }
2943 rps_unlock(sd);
2612} 2944}
2613 2945
2614static int napi_gro_complete(struct sk_buff *skb) 2946static int napi_gro_complete(struct sk_buff *skb)
@@ -2914,24 +3246,27 @@ EXPORT_SYMBOL(napi_gro_frags);
2914static int process_backlog(struct napi_struct *napi, int quota) 3246static int process_backlog(struct napi_struct *napi, int quota)
2915{ 3247{
2916 int work = 0; 3248 int work = 0;
2917 struct softnet_data *queue = &__get_cpu_var(softnet_data); 3249 struct softnet_data *sd = &__get_cpu_var(softnet_data);
2918 unsigned long start_time = jiffies;
2919 3250
2920 napi->weight = weight_p; 3251 napi->weight = weight_p;
2921 do { 3252 do {
2922 struct sk_buff *skb; 3253 struct sk_buff *skb;
2923 3254
2924 local_irq_disable(); 3255 local_irq_disable();
2925 skb = __skb_dequeue(&queue->input_pkt_queue); 3256 rps_lock(sd);
3257 skb = __skb_dequeue(&sd->input_pkt_queue);
2926 if (!skb) { 3258 if (!skb) {
2927 __napi_complete(napi); 3259 __napi_complete(napi);
3260 rps_unlock(sd);
2928 local_irq_enable(); 3261 local_irq_enable();
2929 break; 3262 break;
2930 } 3263 }
3264 input_queue_head_incr(sd);
3265 rps_unlock(sd);
2931 local_irq_enable(); 3266 local_irq_enable();
2932 3267
2933 netif_receive_skb(skb); 3268 __netif_receive_skb(skb);
2934 } while (++work < quota && jiffies == start_time); 3269 } while (++work < quota);
2935 3270
2936 return work; 3271 return work;
2937} 3272}
@@ -3019,6 +3354,34 @@ void netif_napi_del(struct napi_struct *napi)
3019} 3354}
3020EXPORT_SYMBOL(netif_napi_del); 3355EXPORT_SYMBOL(netif_napi_del);
3021 3356
3357/*
3358 * net_rps_action sends any pending IPI's for rps.
3359 * Note: called with local irq disabled, but exits with local irq enabled.
3360 */
3361static void net_rps_action_and_irq_disable(void)
3362{
3363#ifdef CONFIG_RPS
3364 struct softnet_data *sd = &__get_cpu_var(softnet_data);
3365 struct softnet_data *remsd = sd->rps_ipi_list;
3366
3367 if (remsd) {
3368 sd->rps_ipi_list = NULL;
3369
3370 local_irq_enable();
3371
3372 /* Send pending IPI's to kick RPS processing on remote cpus. */
3373 while (remsd) {
3374 struct softnet_data *next = remsd->rps_ipi_next;
3375
3376 if (cpu_online(remsd->cpu))
3377 __smp_call_function_single(remsd->cpu,
3378 &remsd->csd, 0);
3379 remsd = next;
3380 }
3381 } else
3382#endif
3383 local_irq_enable();
3384}
3022 3385
3023static void net_rx_action(struct softirq_action *h) 3386static void net_rx_action(struct softirq_action *h)
3024{ 3387{
@@ -3088,7 +3451,7 @@ static void net_rx_action(struct softirq_action *h)
3088 netpoll_poll_unlock(have); 3451 netpoll_poll_unlock(have);
3089 } 3452 }
3090out: 3453out:
3091 local_irq_enable(); 3454 net_rps_action_and_irq_disable();
3092 3455
3093#ifdef CONFIG_NET_DMA 3456#ifdef CONFIG_NET_DMA
3094 /* 3457 /*
@@ -3334,10 +3697,10 @@ static int softnet_seq_show(struct seq_file *seq, void *v)
3334{ 3697{
3335 struct netif_rx_stats *s = v; 3698 struct netif_rx_stats *s = v;
3336 3699
3337 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n", 3700 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
3338 s->total, s->dropped, s->time_squeeze, 0, 3701 s->total, s->dropped, s->time_squeeze, 0,
3339 0, 0, 0, 0, /* was fastroute */ 3702 0, 0, 0, 0, /* was fastroute */
3340 s->cpu_collision); 3703 s->cpu_collision, s->received_rps);
3341 return 0; 3704 return 0;
3342} 3705}
3343 3706
@@ -3560,11 +3923,10 @@ int netdev_set_master(struct net_device *slave, struct net_device *master)
3560 3923
3561 slave->master = master; 3924 slave->master = master;
3562 3925
3563 synchronize_net(); 3926 if (old) {
3564 3927 synchronize_net();
3565 if (old)
3566 dev_put(old); 3928 dev_put(old);
3567 3929 }
3568 if (master) 3930 if (master)
3569 slave->flags |= IFF_SLAVE; 3931 slave->flags |= IFF_SLAVE;
3570 else 3932 else
@@ -3741,562 +4103,6 @@ void dev_set_rx_mode(struct net_device *dev)
3741 netif_addr_unlock_bh(dev); 4103 netif_addr_unlock_bh(dev);
3742} 4104}
3743 4105
3744/* hw addresses list handling functions */
3745
3746static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
3747 int addr_len, unsigned char addr_type)
3748{
3749 struct netdev_hw_addr *ha;
3750 int alloc_size;
3751
3752 if (addr_len > MAX_ADDR_LEN)
3753 return -EINVAL;
3754
3755 list_for_each_entry(ha, &list->list, list) {
3756 if (!memcmp(ha->addr, addr, addr_len) &&
3757 ha->type == addr_type) {
3758 ha->refcount++;
3759 return 0;
3760 }
3761 }
3762
3763
3764 alloc_size = sizeof(*ha);
3765 if (alloc_size < L1_CACHE_BYTES)
3766 alloc_size = L1_CACHE_BYTES;
3767 ha = kmalloc(alloc_size, GFP_ATOMIC);
3768 if (!ha)
3769 return -ENOMEM;
3770 memcpy(ha->addr, addr, addr_len);
3771 ha->type = addr_type;
3772 ha->refcount = 1;
3773 ha->synced = false;
3774 list_add_tail_rcu(&ha->list, &list->list);
3775 list->count++;
3776 return 0;
3777}
3778
3779static void ha_rcu_free(struct rcu_head *head)
3780{
3781 struct netdev_hw_addr *ha;
3782
3783 ha = container_of(head, struct netdev_hw_addr, rcu_head);
3784 kfree(ha);
3785}
3786
3787static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
3788 int addr_len, unsigned char addr_type)
3789{
3790 struct netdev_hw_addr *ha;
3791
3792 list_for_each_entry(ha, &list->list, list) {
3793 if (!memcmp(ha->addr, addr, addr_len) &&
3794 (ha->type == addr_type || !addr_type)) {
3795 if (--ha->refcount)
3796 return 0;
3797 list_del_rcu(&ha->list);
3798 call_rcu(&ha->rcu_head, ha_rcu_free);
3799 list->count--;
3800 return 0;
3801 }
3802 }
3803 return -ENOENT;
3804}
3805
3806static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
3807 struct netdev_hw_addr_list *from_list,
3808 int addr_len,
3809 unsigned char addr_type)
3810{
3811 int err;
3812 struct netdev_hw_addr *ha, *ha2;
3813 unsigned char type;
3814
3815 list_for_each_entry(ha, &from_list->list, list) {
3816 type = addr_type ? addr_type : ha->type;
3817 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
3818 if (err)
3819 goto unroll;
3820 }
3821 return 0;
3822
3823unroll:
3824 list_for_each_entry(ha2, &from_list->list, list) {
3825 if (ha2 == ha)
3826 break;
3827 type = addr_type ? addr_type : ha2->type;
3828 __hw_addr_del(to_list, ha2->addr, addr_len, type);
3829 }
3830 return err;
3831}
3832
3833static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
3834 struct netdev_hw_addr_list *from_list,
3835 int addr_len,
3836 unsigned char addr_type)
3837{
3838 struct netdev_hw_addr *ha;
3839 unsigned char type;
3840
3841 list_for_each_entry(ha, &from_list->list, list) {
3842 type = addr_type ? addr_type : ha->type;
3843 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
3844 }
3845}
3846
3847static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
3848 struct netdev_hw_addr_list *from_list,
3849 int addr_len)
3850{
3851 int err = 0;
3852 struct netdev_hw_addr *ha, *tmp;
3853
3854 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3855 if (!ha->synced) {
3856 err = __hw_addr_add(to_list, ha->addr,
3857 addr_len, ha->type);
3858 if (err)
3859 break;
3860 ha->synced = true;
3861 ha->refcount++;
3862 } else if (ha->refcount == 1) {
3863 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
3864 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
3865 }
3866 }
3867 return err;
3868}
3869
3870static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
3871 struct netdev_hw_addr_list *from_list,
3872 int addr_len)
3873{
3874 struct netdev_hw_addr *ha, *tmp;
3875
3876 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
3877 if (ha->synced) {
3878 __hw_addr_del(to_list, ha->addr,
3879 addr_len, ha->type);
3880 ha->synced = false;
3881 __hw_addr_del(from_list, ha->addr,
3882 addr_len, ha->type);
3883 }
3884 }
3885}
3886
3887static void __hw_addr_flush(struct netdev_hw_addr_list *list)
3888{
3889 struct netdev_hw_addr *ha, *tmp;
3890
3891 list_for_each_entry_safe(ha, tmp, &list->list, list) {
3892 list_del_rcu(&ha->list);
3893 call_rcu(&ha->rcu_head, ha_rcu_free);
3894 }
3895 list->count = 0;
3896}
3897
3898static void __hw_addr_init(struct netdev_hw_addr_list *list)
3899{
3900 INIT_LIST_HEAD(&list->list);
3901 list->count = 0;
3902}
3903
3904/* Device addresses handling functions */
3905
3906static void dev_addr_flush(struct net_device *dev)
3907{
3908 /* rtnl_mutex must be held here */
3909
3910 __hw_addr_flush(&dev->dev_addrs);
3911 dev->dev_addr = NULL;
3912}
3913
3914static int dev_addr_init(struct net_device *dev)
3915{
3916 unsigned char addr[MAX_ADDR_LEN];
3917 struct netdev_hw_addr *ha;
3918 int err;
3919
3920 /* rtnl_mutex must be held here */
3921
3922 __hw_addr_init(&dev->dev_addrs);
3923 memset(addr, 0, sizeof(addr));
3924 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
3925 NETDEV_HW_ADDR_T_LAN);
3926 if (!err) {
3927 /*
3928 * Get the first (previously created) address from the list
3929 * and set dev_addr pointer to this location.
3930 */
3931 ha = list_first_entry(&dev->dev_addrs.list,
3932 struct netdev_hw_addr, list);
3933 dev->dev_addr = ha->addr;
3934 }
3935 return err;
3936}
3937
3938/**
3939 * dev_addr_add - Add a device address
3940 * @dev: device
3941 * @addr: address to add
3942 * @addr_type: address type
3943 *
3944 * Add a device address to the device or increase the reference count if
3945 * it already exists.
3946 *
3947 * The caller must hold the rtnl_mutex.
3948 */
3949int dev_addr_add(struct net_device *dev, unsigned char *addr,
3950 unsigned char addr_type)
3951{
3952 int err;
3953
3954 ASSERT_RTNL();
3955
3956 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
3957 if (!err)
3958 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3959 return err;
3960}
3961EXPORT_SYMBOL(dev_addr_add);
3962
3963/**
3964 * dev_addr_del - Release a device address.
3965 * @dev: device
3966 * @addr: address to delete
3967 * @addr_type: address type
3968 *
3969 * Release reference to a device address and remove it from the device
3970 * if the reference count drops to zero.
3971 *
3972 * The caller must hold the rtnl_mutex.
3973 */
3974int dev_addr_del(struct net_device *dev, unsigned char *addr,
3975 unsigned char addr_type)
3976{
3977 int err;
3978 struct netdev_hw_addr *ha;
3979
3980 ASSERT_RTNL();
3981
3982 /*
3983 * We can not remove the first address from the list because
3984 * dev->dev_addr points to that.
3985 */
3986 ha = list_first_entry(&dev->dev_addrs.list,
3987 struct netdev_hw_addr, list);
3988 if (ha->addr == dev->dev_addr && ha->refcount == 1)
3989 return -ENOENT;
3990
3991 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
3992 addr_type);
3993 if (!err)
3994 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
3995 return err;
3996}
3997EXPORT_SYMBOL(dev_addr_del);
3998
3999/**
4000 * dev_addr_add_multiple - Add device addresses from another device
4001 * @to_dev: device to which addresses will be added
4002 * @from_dev: device from which addresses will be added
4003 * @addr_type: address type - 0 means type will be used from from_dev
4004 *
4005 * Add device addresses of the one device to another.
4006 **
4007 * The caller must hold the rtnl_mutex.
4008 */
4009int dev_addr_add_multiple(struct net_device *to_dev,
4010 struct net_device *from_dev,
4011 unsigned char addr_type)
4012{
4013 int err;
4014
4015 ASSERT_RTNL();
4016
4017 if (from_dev->addr_len != to_dev->addr_len)
4018 return -EINVAL;
4019 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4020 to_dev->addr_len, addr_type);
4021 if (!err)
4022 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4023 return err;
4024}
4025EXPORT_SYMBOL(dev_addr_add_multiple);
4026
4027/**
4028 * dev_addr_del_multiple - Delete device addresses by another device
4029 * @to_dev: device where the addresses will be deleted
4030 * @from_dev: device by which addresses the addresses will be deleted
4031 * @addr_type: address type - 0 means type will used from from_dev
4032 *
4033 * Deletes addresses in to device by the list of addresses in from device.
4034 *
4035 * The caller must hold the rtnl_mutex.
4036 */
4037int dev_addr_del_multiple(struct net_device *to_dev,
4038 struct net_device *from_dev,
4039 unsigned char addr_type)
4040{
4041 ASSERT_RTNL();
4042
4043 if (from_dev->addr_len != to_dev->addr_len)
4044 return -EINVAL;
4045 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
4046 to_dev->addr_len, addr_type);
4047 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
4048 return 0;
4049}
4050EXPORT_SYMBOL(dev_addr_del_multiple);
4051
4052/* multicast addresses handling functions */
4053
4054int __dev_addr_delete(struct dev_addr_list **list, int *count,
4055 void *addr, int alen, int glbl)
4056{
4057 struct dev_addr_list *da;
4058
4059 for (; (da = *list) != NULL; list = &da->next) {
4060 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4061 alen == da->da_addrlen) {
4062 if (glbl) {
4063 int old_glbl = da->da_gusers;
4064 da->da_gusers = 0;
4065 if (old_glbl == 0)
4066 break;
4067 }
4068 if (--da->da_users)
4069 return 0;
4070
4071 *list = da->next;
4072 kfree(da);
4073 (*count)--;
4074 return 0;
4075 }
4076 }
4077 return -ENOENT;
4078}
4079
4080int __dev_addr_add(struct dev_addr_list **list, int *count,
4081 void *addr, int alen, int glbl)
4082{
4083 struct dev_addr_list *da;
4084
4085 for (da = *list; da != NULL; da = da->next) {
4086 if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
4087 da->da_addrlen == alen) {
4088 if (glbl) {
4089 int old_glbl = da->da_gusers;
4090 da->da_gusers = 1;
4091 if (old_glbl)
4092 return 0;
4093 }
4094 da->da_users++;
4095 return 0;
4096 }
4097 }
4098
4099 da = kzalloc(sizeof(*da), GFP_ATOMIC);
4100 if (da == NULL)
4101 return -ENOMEM;
4102 memcpy(da->da_addr, addr, alen);
4103 da->da_addrlen = alen;
4104 da->da_users = 1;
4105 da->da_gusers = glbl ? 1 : 0;
4106 da->next = *list;
4107 *list = da;
4108 (*count)++;
4109 return 0;
4110}
4111
4112/**
4113 * dev_unicast_delete - Release secondary unicast address.
4114 * @dev: device
4115 * @addr: address to delete
4116 *
4117 * Release reference to a secondary unicast address and remove it
4118 * from the device if the reference count drops to zero.
4119 *
4120 * The caller must hold the rtnl_mutex.
4121 */
4122int dev_unicast_delete(struct net_device *dev, void *addr)
4123{
4124 int err;
4125
4126 ASSERT_RTNL();
4127
4128 netif_addr_lock_bh(dev);
4129 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
4130 NETDEV_HW_ADDR_T_UNICAST);
4131 if (!err)
4132 __dev_set_rx_mode(dev);
4133 netif_addr_unlock_bh(dev);
4134 return err;
4135}
4136EXPORT_SYMBOL(dev_unicast_delete);
4137
4138/**
4139 * dev_unicast_add - add a secondary unicast address
4140 * @dev: device
4141 * @addr: address to add
4142 *
4143 * Add a secondary unicast address to the device or increase
4144 * the reference count if it already exists.
4145 *
4146 * The caller must hold the rtnl_mutex.
4147 */
4148int dev_unicast_add(struct net_device *dev, void *addr)
4149{
4150 int err;
4151
4152 ASSERT_RTNL();
4153
4154 netif_addr_lock_bh(dev);
4155 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
4156 NETDEV_HW_ADDR_T_UNICAST);
4157 if (!err)
4158 __dev_set_rx_mode(dev);
4159 netif_addr_unlock_bh(dev);
4160 return err;
4161}
4162EXPORT_SYMBOL(dev_unicast_add);
4163
4164int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
4165 struct dev_addr_list **from, int *from_count)
4166{
4167 struct dev_addr_list *da, *next;
4168 int err = 0;
4169
4170 da = *from;
4171 while (da != NULL) {
4172 next = da->next;
4173 if (!da->da_synced) {
4174 err = __dev_addr_add(to, to_count,
4175 da->da_addr, da->da_addrlen, 0);
4176 if (err < 0)
4177 break;
4178 da->da_synced = 1;
4179 da->da_users++;
4180 } else if (da->da_users == 1) {
4181 __dev_addr_delete(to, to_count,
4182 da->da_addr, da->da_addrlen, 0);
4183 __dev_addr_delete(from, from_count,
4184 da->da_addr, da->da_addrlen, 0);
4185 }
4186 da = next;
4187 }
4188 return err;
4189}
4190EXPORT_SYMBOL_GPL(__dev_addr_sync);
4191
4192void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
4193 struct dev_addr_list **from, int *from_count)
4194{
4195 struct dev_addr_list *da, *next;
4196
4197 da = *from;
4198 while (da != NULL) {
4199 next = da->next;
4200 if (da->da_synced) {
4201 __dev_addr_delete(to, to_count,
4202 da->da_addr, da->da_addrlen, 0);
4203 da->da_synced = 0;
4204 __dev_addr_delete(from, from_count,
4205 da->da_addr, da->da_addrlen, 0);
4206 }
4207 da = next;
4208 }
4209}
4210EXPORT_SYMBOL_GPL(__dev_addr_unsync);
4211
4212/**
4213 * dev_unicast_sync - Synchronize device's unicast list to another device
4214 * @to: destination device
4215 * @from: source device
4216 *
4217 * Add newly added addresses to the destination device and release
4218 * addresses that have no users left. The source device must be
4219 * locked by netif_tx_lock_bh.
4220 *
4221 * This function is intended to be called from the dev->set_rx_mode
4222 * function of layered software devices.
4223 */
4224int dev_unicast_sync(struct net_device *to, struct net_device *from)
4225{
4226 int err = 0;
4227
4228 if (to->addr_len != from->addr_len)
4229 return -EINVAL;
4230
4231 netif_addr_lock_bh(to);
4232 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
4233 if (!err)
4234 __dev_set_rx_mode(to);
4235 netif_addr_unlock_bh(to);
4236 return err;
4237}
4238EXPORT_SYMBOL(dev_unicast_sync);
4239
4240/**
4241 * dev_unicast_unsync - Remove synchronized addresses from the destination device
4242 * @to: destination device
4243 * @from: source device
4244 *
4245 * Remove all addresses that were added to the destination device by
4246 * dev_unicast_sync(). This function is intended to be called from the
4247 * dev->stop function of layered software devices.
4248 */
4249void dev_unicast_unsync(struct net_device *to, struct net_device *from)
4250{
4251 if (to->addr_len != from->addr_len)
4252 return;
4253
4254 netif_addr_lock_bh(from);
4255 netif_addr_lock(to);
4256 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
4257 __dev_set_rx_mode(to);
4258 netif_addr_unlock(to);
4259 netif_addr_unlock_bh(from);
4260}
4261EXPORT_SYMBOL(dev_unicast_unsync);
4262
4263static void dev_unicast_flush(struct net_device *dev)
4264{
4265 netif_addr_lock_bh(dev);
4266 __hw_addr_flush(&dev->uc);
4267 netif_addr_unlock_bh(dev);
4268}
4269
4270static void dev_unicast_init(struct net_device *dev)
4271{
4272 __hw_addr_init(&dev->uc);
4273}
4274
4275
4276static void __dev_addr_discard(struct dev_addr_list **list)
4277{
4278 struct dev_addr_list *tmp;
4279
4280 while (*list != NULL) {
4281 tmp = *list;
4282 *list = tmp->next;
4283 if (tmp->da_users > tmp->da_gusers)
4284 printk("__dev_addr_discard: address leakage! "
4285 "da_users=%d\n", tmp->da_users);
4286 kfree(tmp);
4287 }
4288}
4289
4290static void dev_addr_discard(struct net_device *dev)
4291{
4292 netif_addr_lock_bh(dev);
4293
4294 __dev_addr_discard(&dev->mc_list);
4295 netdev_mc_count(dev) = 0;
4296
4297 netif_addr_unlock_bh(dev);
4298}
4299
4300/** 4106/**
4301 * dev_get_flags - get flags reported to userspace 4107 * dev_get_flags - get flags reported to userspace
4302 * @dev: device 4108 * @dev: device
@@ -4607,8 +4413,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4607 return -EINVAL; 4413 return -EINVAL;
4608 if (!netif_device_present(dev)) 4414 if (!netif_device_present(dev))
4609 return -ENODEV; 4415 return -ENODEV;
4610 return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data, 4416 return dev_mc_add_global(dev, ifr->ifr_hwaddr.sa_data);
4611 dev->addr_len, 1);
4612 4417
4613 case SIOCDELMULTI: 4418 case SIOCDELMULTI:
4614 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) || 4419 if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
@@ -4616,8 +4421,7 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
4616 return -EINVAL; 4421 return -EINVAL;
4617 if (!netif_device_present(dev)) 4422 if (!netif_device_present(dev))
4618 return -ENODEV; 4423 return -ENODEV;
4619 return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data, 4424 return dev_mc_del_global(dev, ifr->ifr_hwaddr.sa_data);
4620 dev->addr_len, 1);
4621 4425
4622 case SIOCSIFTXQLEN: 4426 case SIOCSIFTXQLEN:
4623 if (ifr->ifr_qlen < 0) 4427 if (ifr->ifr_qlen < 0)
@@ -4924,8 +4728,8 @@ static void rollback_registered_many(struct list_head *head)
4924 /* 4728 /*
4925 * Flush the unicast and multicast chains 4729 * Flush the unicast and multicast chains
4926 */ 4730 */
4927 dev_unicast_flush(dev); 4731 dev_uc_flush(dev);
4928 dev_addr_discard(dev); 4732 dev_mc_flush(dev);
4929 4733
4930 if (dev->netdev_ops->ndo_uninit) 4734 if (dev->netdev_ops->ndo_uninit)
4931 dev->netdev_ops->ndo_uninit(dev); 4735 dev->netdev_ops->ndo_uninit(dev);
@@ -5074,6 +4878,24 @@ int register_netdevice(struct net_device *dev)
5074 4878
5075 dev->iflink = -1; 4879 dev->iflink = -1;
5076 4880
4881#ifdef CONFIG_RPS
4882 if (!dev->num_rx_queues) {
4883 /*
4884 * Allocate a single RX queue if driver never called
4885 * alloc_netdev_mq
4886 */
4887
4888 dev->_rx = kzalloc(sizeof(struct netdev_rx_queue), GFP_KERNEL);
4889 if (!dev->_rx) {
4890 ret = -ENOMEM;
4891 goto out;
4892 }
4893
4894 dev->_rx->first = dev->_rx;
4895 atomic_set(&dev->_rx->count, 1);
4896 dev->num_rx_queues = 1;
4897 }
4898#endif
5077 /* Init, if this function is available */ 4899 /* Init, if this function is available */
5078 if (dev->netdev_ops->ndo_init) { 4900 if (dev->netdev_ops->ndo_init) {
5079 ret = dev->netdev_ops->ndo_init(dev); 4901 ret = dev->netdev_ops->ndo_init(dev);
@@ -5434,6 +5256,10 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5434 struct net_device *dev; 5256 struct net_device *dev;
5435 size_t alloc_size; 5257 size_t alloc_size;
5436 struct net_device *p; 5258 struct net_device *p;
5259#ifdef CONFIG_RPS
5260 struct netdev_rx_queue *rx;
5261 int i;
5262#endif
5437 5263
5438 BUG_ON(strlen(name) >= sizeof(dev->name)); 5264 BUG_ON(strlen(name) >= sizeof(dev->name));
5439 5265
@@ -5459,13 +5285,32 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5459 goto free_p; 5285 goto free_p;
5460 } 5286 }
5461 5287
5288#ifdef CONFIG_RPS
5289 rx = kcalloc(queue_count, sizeof(struct netdev_rx_queue), GFP_KERNEL);
5290 if (!rx) {
5291 printk(KERN_ERR "alloc_netdev: Unable to allocate "
5292 "rx queues.\n");
5293 goto free_tx;
5294 }
5295
5296 atomic_set(&rx->count, queue_count);
5297
5298 /*
5299 * Set a pointer to first element in the array which holds the
5300 * reference count.
5301 */
5302 for (i = 0; i < queue_count; i++)
5303 rx[i].first = rx;
5304#endif
5305
5462 dev = PTR_ALIGN(p, NETDEV_ALIGN); 5306 dev = PTR_ALIGN(p, NETDEV_ALIGN);
5463 dev->padded = (char *)dev - (char *)p; 5307 dev->padded = (char *)dev - (char *)p;
5464 5308
5465 if (dev_addr_init(dev)) 5309 if (dev_addr_init(dev))
5466 goto free_tx; 5310 goto free_rx;
5467 5311
5468 dev_unicast_init(dev); 5312 dev_mc_init(dev);
5313 dev_uc_init(dev);
5469 5314
5470 dev_net_set(dev, &init_net); 5315 dev_net_set(dev, &init_net);
5471 5316
@@ -5473,6 +5318,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5473 dev->num_tx_queues = queue_count; 5318 dev->num_tx_queues = queue_count;
5474 dev->real_num_tx_queues = queue_count; 5319 dev->real_num_tx_queues = queue_count;
5475 5320
5321#ifdef CONFIG_RPS
5322 dev->_rx = rx;
5323 dev->num_rx_queues = queue_count;
5324#endif
5325
5476 dev->gso_max_size = GSO_MAX_SIZE; 5326 dev->gso_max_size = GSO_MAX_SIZE;
5477 5327
5478 netdev_init_queues(dev); 5328 netdev_init_queues(dev);
@@ -5487,9 +5337,12 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5487 strcpy(dev->name, name); 5337 strcpy(dev->name, name);
5488 return dev; 5338 return dev;
5489 5339
5340free_rx:
5341#ifdef CONFIG_RPS
5342 kfree(rx);
5490free_tx: 5343free_tx:
5344#endif
5491 kfree(tx); 5345 kfree(tx);
5492
5493free_p: 5346free_p:
5494 kfree(p); 5347 kfree(p);
5495 return NULL; 5348 return NULL;
@@ -5691,8 +5544,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
5691 /* 5544 /*
5692 * Flush the unicast and multicast chains 5545 * Flush the unicast and multicast chains
5693 */ 5546 */
5694 dev_unicast_flush(dev); 5547 dev_uc_flush(dev);
5695 dev_addr_discard(dev); 5548 dev_mc_flush(dev);
5696 5549
5697 netdev_unregister_kobject(dev); 5550 netdev_unregister_kobject(dev);
5698 5551
@@ -5768,8 +5621,10 @@ static int dev_cpu_callback(struct notifier_block *nfb,
5768 local_irq_enable(); 5621 local_irq_enable();
5769 5622
5770 /* Process offline CPU's input_pkt_queue */ 5623 /* Process offline CPU's input_pkt_queue */
5771 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) 5624 while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
5772 netif_rx(skb); 5625 netif_rx(skb);
5626 input_queue_head_incr(oldsd);
5627 }
5773 5628
5774 return NOTIFY_OK; 5629 return NOTIFY_OK;
5775} 5630}
@@ -5985,17 +5840,23 @@ static int __init net_dev_init(void)
5985 */ 5840 */
5986 5841
5987 for_each_possible_cpu(i) { 5842 for_each_possible_cpu(i) {
5988 struct softnet_data *queue; 5843 struct softnet_data *sd = &per_cpu(softnet_data, i);
5989 5844
5990 queue = &per_cpu(softnet_data, i); 5845 skb_queue_head_init(&sd->input_pkt_queue);
5991 skb_queue_head_init(&queue->input_pkt_queue); 5846 sd->completion_queue = NULL;
5992 queue->completion_queue = NULL; 5847 INIT_LIST_HEAD(&sd->poll_list);
5993 INIT_LIST_HEAD(&queue->poll_list); 5848
5849#ifdef CONFIG_RPS
5850 sd->csd.func = rps_trigger_softirq;
5851 sd->csd.info = sd;
5852 sd->csd.flags = 0;
5853 sd->cpu = i;
5854#endif
5994 5855
5995 queue->backlog.poll = process_backlog; 5856 sd->backlog.poll = process_backlog;
5996 queue->backlog.weight = weight_p; 5857 sd->backlog.weight = weight_p;
5997 queue->backlog.gro_list = NULL; 5858 sd->backlog.gro_list = NULL;
5998 queue->backlog.gro_count = 0; 5859 sd->backlog.gro_count = 0;
5999 } 5860 }
6000 5861
6001 dev_boot_phase = 0; 5862 dev_boot_phase = 0;
@@ -6030,7 +5891,7 @@ subsys_initcall(net_dev_init);
6030 5891
6031static int __init initialize_hashrnd(void) 5892static int __init initialize_hashrnd(void)
6032{ 5893{
6033 get_random_bytes(&skb_tx_hashrnd, sizeof(skb_tx_hashrnd)); 5894 get_random_bytes(&hashrnd, sizeof(hashrnd));
6034 return 0; 5895 return 0;
6035} 5896}
6036 5897
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
new file mode 100644
index 000000000000..508f9c18992f
--- /dev/null
+++ b/net/core/dev_addr_lists.c
@@ -0,0 +1,741 @@
1/*
2 * net/core/dev_addr_lists.c - Functions for handling net device lists
3 * Copyright (c) 2010 Jiri Pirko <jpirko@redhat.com>
4 *
5 * This file contains functions for working with unicast, multicast and device
6 * addresses lists.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/list.h>
17#include <linux/proc_fs.h>
18
19/*
20 * General list handling functions
21 */
22
23static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
24 unsigned char *addr, int addr_len,
25 unsigned char addr_type, bool global)
26{
27 struct netdev_hw_addr *ha;
28 int alloc_size;
29
30 if (addr_len > MAX_ADDR_LEN)
31 return -EINVAL;
32
33 list_for_each_entry(ha, &list->list, list) {
34 if (!memcmp(ha->addr, addr, addr_len) &&
35 ha->type == addr_type) {
36 if (global) {
37 /* check if addr is already used as global */
38 if (ha->global_use)
39 return 0;
40 else
41 ha->global_use = true;
42 }
43 ha->refcount++;
44 return 0;
45 }
46 }
47
48
49 alloc_size = sizeof(*ha);
50 if (alloc_size < L1_CACHE_BYTES)
51 alloc_size = L1_CACHE_BYTES;
52 ha = kmalloc(alloc_size, GFP_ATOMIC);
53 if (!ha)
54 return -ENOMEM;
55 memcpy(ha->addr, addr, addr_len);
56 ha->type = addr_type;
57 ha->refcount = 1;
58 ha->global_use = global;
59 ha->synced = false;
60 list_add_tail_rcu(&ha->list, &list->list);
61 list->count++;
62 return 0;
63}
64
65static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
66 int addr_len, unsigned char addr_type)
67{
68 return __hw_addr_add_ex(list, addr, addr_len, addr_type, false);
69}
70
71static void ha_rcu_free(struct rcu_head *head)
72{
73 struct netdev_hw_addr *ha;
74
75 ha = container_of(head, struct netdev_hw_addr, rcu_head);
76 kfree(ha);
77}
78
79static int __hw_addr_del_ex(struct netdev_hw_addr_list *list,
80 unsigned char *addr, int addr_len,
81 unsigned char addr_type, bool global)
82{
83 struct netdev_hw_addr *ha;
84
85 list_for_each_entry(ha, &list->list, list) {
86 if (!memcmp(ha->addr, addr, addr_len) &&
87 (ha->type == addr_type || !addr_type)) {
88 if (global) {
89 if (!ha->global_use)
90 break;
91 else
92 ha->global_use = false;
93 }
94 if (--ha->refcount)
95 return 0;
96 list_del_rcu(&ha->list);
97 call_rcu(&ha->rcu_head, ha_rcu_free);
98 list->count--;
99 return 0;
100 }
101 }
102 return -ENOENT;
103}
104
105static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
106 int addr_len, unsigned char addr_type)
107{
108 return __hw_addr_del_ex(list, addr, addr_len, addr_type, false);
109}
110
111int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
112 struct netdev_hw_addr_list *from_list,
113 int addr_len, unsigned char addr_type)
114{
115 int err;
116 struct netdev_hw_addr *ha, *ha2;
117 unsigned char type;
118
119 list_for_each_entry(ha, &from_list->list, list) {
120 type = addr_type ? addr_type : ha->type;
121 err = __hw_addr_add(to_list, ha->addr, addr_len, type);
122 if (err)
123 goto unroll;
124 }
125 return 0;
126
127unroll:
128 list_for_each_entry(ha2, &from_list->list, list) {
129 if (ha2 == ha)
130 break;
131 type = addr_type ? addr_type : ha2->type;
132 __hw_addr_del(to_list, ha2->addr, addr_len, type);
133 }
134 return err;
135}
136EXPORT_SYMBOL(__hw_addr_add_multiple);
137
138void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
139 struct netdev_hw_addr_list *from_list,
140 int addr_len, unsigned char addr_type)
141{
142 struct netdev_hw_addr *ha;
143 unsigned char type;
144
145 list_for_each_entry(ha, &from_list->list, list) {
146 type = addr_type ? addr_type : ha->type;
147 __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
148 }
149}
150EXPORT_SYMBOL(__hw_addr_del_multiple);
151
152int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
153 struct netdev_hw_addr_list *from_list,
154 int addr_len)
155{
156 int err = 0;
157 struct netdev_hw_addr *ha, *tmp;
158
159 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
160 if (!ha->synced) {
161 err = __hw_addr_add(to_list, ha->addr,
162 addr_len, ha->type);
163 if (err)
164 break;
165 ha->synced = true;
166 ha->refcount++;
167 } else if (ha->refcount == 1) {
168 __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
169 __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
170 }
171 }
172 return err;
173}
174EXPORT_SYMBOL(__hw_addr_sync);
175
176void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
177 struct netdev_hw_addr_list *from_list,
178 int addr_len)
179{
180 struct netdev_hw_addr *ha, *tmp;
181
182 list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
183 if (ha->synced) {
184 __hw_addr_del(to_list, ha->addr,
185 addr_len, ha->type);
186 ha->synced = false;
187 __hw_addr_del(from_list, ha->addr,
188 addr_len, ha->type);
189 }
190 }
191}
192EXPORT_SYMBOL(__hw_addr_unsync);
193
194void __hw_addr_flush(struct netdev_hw_addr_list *list)
195{
196 struct netdev_hw_addr *ha, *tmp;
197
198 list_for_each_entry_safe(ha, tmp, &list->list, list) {
199 list_del_rcu(&ha->list);
200 call_rcu(&ha->rcu_head, ha_rcu_free);
201 }
202 list->count = 0;
203}
204EXPORT_SYMBOL(__hw_addr_flush);
205
206void __hw_addr_init(struct netdev_hw_addr_list *list)
207{
208 INIT_LIST_HEAD(&list->list);
209 list->count = 0;
210}
211EXPORT_SYMBOL(__hw_addr_init);
212
213/*
214 * Device addresses handling functions
215 */
216
217/**
218 * dev_addr_flush - Flush device address list
219 * @dev: device
220 *
221 * Flush device address list and reset ->dev_addr.
222 *
223 * The caller must hold the rtnl_mutex.
224 */
225void dev_addr_flush(struct net_device *dev)
226{
227 /* rtnl_mutex must be held here */
228
229 __hw_addr_flush(&dev->dev_addrs);
230 dev->dev_addr = NULL;
231}
232EXPORT_SYMBOL(dev_addr_flush);
233
234/**
235 * dev_addr_init - Init device address list
236 * @dev: device
237 *
238 * Init device address list and create the first element,
239 * used by ->dev_addr.
240 *
241 * The caller must hold the rtnl_mutex.
242 */
243int dev_addr_init(struct net_device *dev)
244{
245 unsigned char addr[MAX_ADDR_LEN];
246 struct netdev_hw_addr *ha;
247 int err;
248
249 /* rtnl_mutex must be held here */
250
251 __hw_addr_init(&dev->dev_addrs);
252 memset(addr, 0, sizeof(addr));
253 err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
254 NETDEV_HW_ADDR_T_LAN);
255 if (!err) {
256 /*
257 * Get the first (previously created) address from the list
258 * and set dev_addr pointer to this location.
259 */
260 ha = list_first_entry(&dev->dev_addrs.list,
261 struct netdev_hw_addr, list);
262 dev->dev_addr = ha->addr;
263 }
264 return err;
265}
266EXPORT_SYMBOL(dev_addr_init);
267
268/**
269 * dev_addr_add - Add a device address
270 * @dev: device
271 * @addr: address to add
272 * @addr_type: address type
273 *
274 * Add a device address to the device or increase the reference count if
275 * it already exists.
276 *
277 * The caller must hold the rtnl_mutex.
278 */
279int dev_addr_add(struct net_device *dev, unsigned char *addr,
280 unsigned char addr_type)
281{
282 int err;
283
284 ASSERT_RTNL();
285
286 err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
287 if (!err)
288 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
289 return err;
290}
291EXPORT_SYMBOL(dev_addr_add);
292
293/**
294 * dev_addr_del - Release a device address.
295 * @dev: device
296 * @addr: address to delete
297 * @addr_type: address type
298 *
299 * Release reference to a device address and remove it from the device
300 * if the reference count drops to zero.
301 *
302 * The caller must hold the rtnl_mutex.
303 */
304int dev_addr_del(struct net_device *dev, unsigned char *addr,
305 unsigned char addr_type)
306{
307 int err;
308 struct netdev_hw_addr *ha;
309
310 ASSERT_RTNL();
311
312 /*
313 * We can not remove the first address from the list because
314 * dev->dev_addr points to that.
315 */
316 ha = list_first_entry(&dev->dev_addrs.list,
317 struct netdev_hw_addr, list);
318 if (ha->addr == dev->dev_addr && ha->refcount == 1)
319 return -ENOENT;
320
321 err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
322 addr_type);
323 if (!err)
324 call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
325 return err;
326}
327EXPORT_SYMBOL(dev_addr_del);
328
329/**
330 * dev_addr_add_multiple - Add device addresses from another device
331 * @to_dev: device to which addresses will be added
332 * @from_dev: device from which addresses will be added
333 * @addr_type: address type - 0 means type will be used from from_dev
334 *
335 * Add device addresses of the one device to another.
336 **
337 * The caller must hold the rtnl_mutex.
338 */
339int dev_addr_add_multiple(struct net_device *to_dev,
340 struct net_device *from_dev,
341 unsigned char addr_type)
342{
343 int err;
344
345 ASSERT_RTNL();
346
347 if (from_dev->addr_len != to_dev->addr_len)
348 return -EINVAL;
349 err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
350 to_dev->addr_len, addr_type);
351 if (!err)
352 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
353 return err;
354}
355EXPORT_SYMBOL(dev_addr_add_multiple);
356
357/**
358 * dev_addr_del_multiple - Delete device addresses by another device
359 * @to_dev: device where the addresses will be deleted
360 * @from_dev: device by which addresses the addresses will be deleted
361 * @addr_type: address type - 0 means type will used from from_dev
362 *
363 * Deletes addresses in to device by the list of addresses in from device.
364 *
365 * The caller must hold the rtnl_mutex.
366 */
367int dev_addr_del_multiple(struct net_device *to_dev,
368 struct net_device *from_dev,
369 unsigned char addr_type)
370{
371 ASSERT_RTNL();
372
373 if (from_dev->addr_len != to_dev->addr_len)
374 return -EINVAL;
375 __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
376 to_dev->addr_len, addr_type);
377 call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
378 return 0;
379}
380EXPORT_SYMBOL(dev_addr_del_multiple);
381
382/*
383 * Unicast list handling functions
384 */
385
386/**
387 * dev_uc_add - Add a secondary unicast address
388 * @dev: device
389 * @addr: address to add
390 *
391 * Add a secondary unicast address to the device or increase
392 * the reference count if it already exists.
393 */
394int dev_uc_add(struct net_device *dev, unsigned char *addr)
395{
396 int err;
397
398 netif_addr_lock_bh(dev);
399 err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
400 NETDEV_HW_ADDR_T_UNICAST);
401 if (!err)
402 __dev_set_rx_mode(dev);
403 netif_addr_unlock_bh(dev);
404 return err;
405}
406EXPORT_SYMBOL(dev_uc_add);
407
408/**
409 * dev_uc_del - Release secondary unicast address.
410 * @dev: device
411 * @addr: address to delete
412 *
413 * Release reference to a secondary unicast address and remove it
414 * from the device if the reference count drops to zero.
415 */
416int dev_uc_del(struct net_device *dev, unsigned char *addr)
417{
418 int err;
419
420 netif_addr_lock_bh(dev);
421 err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
422 NETDEV_HW_ADDR_T_UNICAST);
423 if (!err)
424 __dev_set_rx_mode(dev);
425 netif_addr_unlock_bh(dev);
426 return err;
427}
428EXPORT_SYMBOL(dev_uc_del);
429
430/**
431 * dev_uc_sync - Synchronize device's unicast list to another device
432 * @to: destination device
433 * @from: source device
434 *
435 * Add newly added addresses to the destination device and release
436 * addresses that have no users left. The source device must be
437 * locked by netif_tx_lock_bh.
438 *
439 * This function is intended to be called from the dev->set_rx_mode
440 * function of layered software devices.
441 */
442int dev_uc_sync(struct net_device *to, struct net_device *from)
443{
444 int err = 0;
445
446 if (to->addr_len != from->addr_len)
447 return -EINVAL;
448
449 netif_addr_lock_bh(to);
450 err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
451 if (!err)
452 __dev_set_rx_mode(to);
453 netif_addr_unlock_bh(to);
454 return err;
455}
456EXPORT_SYMBOL(dev_uc_sync);
457
458/**
459 * dev_uc_unsync - Remove synchronized addresses from the destination device
460 * @to: destination device
461 * @from: source device
462 *
463 * Remove all addresses that were added to the destination device by
464 * dev_uc_sync(). This function is intended to be called from the
465 * dev->stop function of layered software devices.
466 */
467void dev_uc_unsync(struct net_device *to, struct net_device *from)
468{
469 if (to->addr_len != from->addr_len)
470 return;
471
472 netif_addr_lock_bh(from);
473 netif_addr_lock(to);
474 __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
475 __dev_set_rx_mode(to);
476 netif_addr_unlock(to);
477 netif_addr_unlock_bh(from);
478}
479EXPORT_SYMBOL(dev_uc_unsync);
480
481/**
482 * dev_uc_flush - Flush unicast addresses
483 * @dev: device
484 *
485 * Flush unicast addresses.
486 */
487void dev_uc_flush(struct net_device *dev)
488{
489 netif_addr_lock_bh(dev);
490 __hw_addr_flush(&dev->uc);
491 netif_addr_unlock_bh(dev);
492}
493EXPORT_SYMBOL(dev_uc_flush);
494
495/**
496 * dev_uc_flush - Init unicast address list
497 * @dev: device
498 *
499 * Init unicast address list.
500 */
501void dev_uc_init(struct net_device *dev)
502{
503 __hw_addr_init(&dev->uc);
504}
505EXPORT_SYMBOL(dev_uc_init);
506
507/*
508 * Multicast list handling functions
509 */
510
511static int __dev_mc_add(struct net_device *dev, unsigned char *addr,
512 bool global)
513{
514 int err;
515
516 netif_addr_lock_bh(dev);
517 err = __hw_addr_add_ex(&dev->mc, addr, dev->addr_len,
518 NETDEV_HW_ADDR_T_MULTICAST, global);
519 if (!err)
520 __dev_set_rx_mode(dev);
521 netif_addr_unlock_bh(dev);
522 return err;
523}
524/**
525 * dev_mc_add - Add a multicast address
526 * @dev: device
527 * @addr: address to add
528 *
529 * Add a multicast address to the device or increase
530 * the reference count if it already exists.
531 */
532int dev_mc_add(struct net_device *dev, unsigned char *addr)
533{
534 return __dev_mc_add(dev, addr, false);
535}
536EXPORT_SYMBOL(dev_mc_add);
537
538/**
539 * dev_mc_add_global - Add a global multicast address
540 * @dev: device
541 * @addr: address to add
542 *
543 * Add a global multicast address to the device.
544 */
545int dev_mc_add_global(struct net_device *dev, unsigned char *addr)
546{
547 return __dev_mc_add(dev, addr, true);
548}
549EXPORT_SYMBOL(dev_mc_add_global);
550
551static int __dev_mc_del(struct net_device *dev, unsigned char *addr,
552 bool global)
553{
554 int err;
555
556 netif_addr_lock_bh(dev);
557 err = __hw_addr_del_ex(&dev->mc, addr, dev->addr_len,
558 NETDEV_HW_ADDR_T_MULTICAST, global);
559 if (!err)
560 __dev_set_rx_mode(dev);
561 netif_addr_unlock_bh(dev);
562 return err;
563}
564
565/**
566 * dev_mc_del - Delete a multicast address.
567 * @dev: device
568 * @addr: address to delete
569 *
570 * Release reference to a multicast address and remove it
571 * from the device if the reference count drops to zero.
572 */
573int dev_mc_del(struct net_device *dev, unsigned char *addr)
574{
575 return __dev_mc_del(dev, addr, false);
576}
577EXPORT_SYMBOL(dev_mc_del);
578
579/**
580 * dev_mc_del_global - Delete a global multicast address.
581 * @dev: device
582 * @addr: address to delete
583 *
584 * Release reference to a multicast address and remove it
585 * from the device if the reference count drops to zero.
586 */
587int dev_mc_del_global(struct net_device *dev, unsigned char *addr)
588{
589 return __dev_mc_del(dev, addr, true);
590}
591EXPORT_SYMBOL(dev_mc_del_global);
592
593/**
594 * dev_mc_sync - Synchronize device's unicast list to another device
595 * @to: destination device
596 * @from: source device
597 *
598 * Add newly added addresses to the destination device and release
599 * addresses that have no users left. The source device must be
600 * locked by netif_tx_lock_bh.
601 *
602 * This function is intended to be called from the dev->set_multicast_list
603 * or dev->set_rx_mode function of layered software devices.
604 */
605int dev_mc_sync(struct net_device *to, struct net_device *from)
606{
607 int err = 0;
608
609 if (to->addr_len != from->addr_len)
610 return -EINVAL;
611
612 netif_addr_lock_bh(to);
613 err = __hw_addr_sync(&to->mc, &from->mc, to->addr_len);
614 if (!err)
615 __dev_set_rx_mode(to);
616 netif_addr_unlock_bh(to);
617 return err;
618}
619EXPORT_SYMBOL(dev_mc_sync);
620
621/**
622 * dev_mc_unsync - Remove synchronized addresses from the destination device
623 * @to: destination device
624 * @from: source device
625 *
626 * Remove all addresses that were added to the destination device by
627 * dev_mc_sync(). This function is intended to be called from the
628 * dev->stop function of layered software devices.
629 */
630void dev_mc_unsync(struct net_device *to, struct net_device *from)
631{
632 if (to->addr_len != from->addr_len)
633 return;
634
635 netif_addr_lock_bh(from);
636 netif_addr_lock(to);
637 __hw_addr_unsync(&to->mc, &from->mc, to->addr_len);
638 __dev_set_rx_mode(to);
639 netif_addr_unlock(to);
640 netif_addr_unlock_bh(from);
641}
642EXPORT_SYMBOL(dev_mc_unsync);
643
644/**
645 * dev_mc_flush - Flush multicast addresses
646 * @dev: device
647 *
648 * Flush multicast addresses.
649 */
650void dev_mc_flush(struct net_device *dev)
651{
652 netif_addr_lock_bh(dev);
653 __hw_addr_flush(&dev->mc);
654 netif_addr_unlock_bh(dev);
655}
656EXPORT_SYMBOL(dev_mc_flush);
657
658/**
659 * dev_mc_flush - Init multicast address list
660 * @dev: device
661 *
662 * Init multicast address list.
663 */
664void dev_mc_init(struct net_device *dev)
665{
666 __hw_addr_init(&dev->mc);
667}
668EXPORT_SYMBOL(dev_mc_init);
669
670#ifdef CONFIG_PROC_FS
671#include <linux/seq_file.h>
672
673static int dev_mc_seq_show(struct seq_file *seq, void *v)
674{
675 struct netdev_hw_addr *ha;
676 struct net_device *dev = v;
677
678 if (v == SEQ_START_TOKEN)
679 return 0;
680
681 netif_addr_lock_bh(dev);
682 netdev_for_each_mc_addr(ha, dev) {
683 int i;
684
685 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
686 dev->name, ha->refcount, ha->global_use);
687
688 for (i = 0; i < dev->addr_len; i++)
689 seq_printf(seq, "%02x", ha->addr[i]);
690
691 seq_putc(seq, '\n');
692 }
693 netif_addr_unlock_bh(dev);
694 return 0;
695}
696
697static const struct seq_operations dev_mc_seq_ops = {
698 .start = dev_seq_start,
699 .next = dev_seq_next,
700 .stop = dev_seq_stop,
701 .show = dev_mc_seq_show,
702};
703
704static int dev_mc_seq_open(struct inode *inode, struct file *file)
705{
706 return seq_open_net(inode, file, &dev_mc_seq_ops,
707 sizeof(struct seq_net_private));
708}
709
710static const struct file_operations dev_mc_seq_fops = {
711 .owner = THIS_MODULE,
712 .open = dev_mc_seq_open,
713 .read = seq_read,
714 .llseek = seq_lseek,
715 .release = seq_release_net,
716};
717
718#endif
719
720static int __net_init dev_mc_net_init(struct net *net)
721{
722 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
723 return -ENOMEM;
724 return 0;
725}
726
727static void __net_exit dev_mc_net_exit(struct net *net)
728{
729 proc_net_remove(net, "dev_mcast");
730}
731
732static struct pernet_operations __net_initdata dev_mc_net_ops = {
733 .init = dev_mc_net_init,
734 .exit = dev_mc_net_exit,
735};
736
737void __init dev_mcast_init(void)
738{
739 register_pernet_subsys(&dev_mc_net_ops);
740}
741
diff --git a/net/core/dev_mcast.c b/net/core/dev_mcast.c
deleted file mode 100644
index 3dc295beb483..000000000000
--- a/net/core/dev_mcast.c
+++ /dev/null
@@ -1,232 +0,0 @@
1/*
2 * Linux NET3: Multicast List maintenance.
3 *
4 * Authors:
5 * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
6 * Richard Underwood <richard@wuzz.demon.co.uk>
7 *
8 * Stir fried together from the IP multicast and CAP patches above
9 * Alan Cox <alan@lxorguk.ukuu.org.uk>
10 *
11 * Fixes:
12 * Alan Cox : Update the device on a real delete
13 * rather than any time but...
14 * Alan Cox : IFF_ALLMULTI support.
15 * Alan Cox : New format set_multicast_list() calls.
16 * Gleb Natapov : Remove dev_mc_lock.
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24#include <linux/module.h>
25#include <asm/uaccess.h>
26#include <asm/system.h>
27#include <linux/bitops.h>
28#include <linux/types.h>
29#include <linux/kernel.h>
30#include <linux/string.h>
31#include <linux/mm.h>
32#include <linux/socket.h>
33#include <linux/sockios.h>
34#include <linux/in.h>
35#include <linux/errno.h>
36#include <linux/interrupt.h>
37#include <linux/if_ether.h>
38#include <linux/inet.h>
39#include <linux/netdevice.h>
40#include <linux/etherdevice.h>
41#include <linux/proc_fs.h>
42#include <linux/seq_file.h>
43#include <linux/init.h>
44#include <net/net_namespace.h>
45#include <net/ip.h>
46#include <net/route.h>
47#include <linux/skbuff.h>
48#include <net/sock.h>
49#include <net/arp.h>
50
51
52/*
53 * Device multicast list maintenance.
54 *
55 * This is used both by IP and by the user level maintenance functions.
56 * Unlike BSD we maintain a usage count on a given multicast address so
57 * that a casual user application can add/delete multicasts used by
58 * protocols without doing damage to the protocols when it deletes the
59 * entries. It also helps IP as it tracks overlapping maps.
60 *
61 * Device mc lists are changed by bh at least if IPv6 is enabled,
62 * so that it must be bh protected.
63 *
64 * We block accesses to device mc filters with netif_tx_lock.
65 */
66
67/*
68 * Delete a device level multicast
69 */
70
71int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
72{
73 int err;
74
75 netif_addr_lock_bh(dev);
76 err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
77 addr, alen, glbl);
78 if (!err) {
79 /*
80 * We have altered the list, so the card
81 * loaded filter is now wrong. Fix it
82 */
83
84 __dev_set_rx_mode(dev);
85 }
86 netif_addr_unlock_bh(dev);
87 return err;
88}
89
90/*
91 * Add a device level multicast
92 */
93
94int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
95{
96 int err;
97
98 netif_addr_lock_bh(dev);
99 if (alen != dev->addr_len)
100 err = -EINVAL;
101 else
102 err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
103 if (!err)
104 __dev_set_rx_mode(dev);
105 netif_addr_unlock_bh(dev);
106 return err;
107}
108
109/**
110 * dev_mc_sync - Synchronize device's multicast list to another device
111 * @to: destination device
112 * @from: source device
113 *
114 * Add newly added addresses to the destination device and release
115 * addresses that have no users left. The source device must be
116 * locked by netif_tx_lock_bh.
117 *
118 * This function is intended to be called from the dev->set_multicast_list
119 * or dev->set_rx_mode function of layered software devices.
120 */
121int dev_mc_sync(struct net_device *to, struct net_device *from)
122{
123 int err = 0;
124
125 netif_addr_lock_bh(to);
126 err = __dev_addr_sync(&to->mc_list, &to->mc_count,
127 &from->mc_list, &from->mc_count);
128 if (!err)
129 __dev_set_rx_mode(to);
130 netif_addr_unlock_bh(to);
131
132 return err;
133}
134EXPORT_SYMBOL(dev_mc_sync);
135
136
137/**
138 * dev_mc_unsync - Remove synchronized addresses from the destination
139 * device
140 * @to: destination device
141 * @from: source device
142 *
143 * Remove all addresses that were added to the destination device by
144 * dev_mc_sync(). This function is intended to be called from the
145 * dev->stop function of layered software devices.
146 */
147void dev_mc_unsync(struct net_device *to, struct net_device *from)
148{
149 netif_addr_lock_bh(from);
150 netif_addr_lock(to);
151
152 __dev_addr_unsync(&to->mc_list, &to->mc_count,
153 &from->mc_list, &from->mc_count);
154 __dev_set_rx_mode(to);
155
156 netif_addr_unlock(to);
157 netif_addr_unlock_bh(from);
158}
159EXPORT_SYMBOL(dev_mc_unsync);
160
161#ifdef CONFIG_PROC_FS
162static int dev_mc_seq_show(struct seq_file *seq, void *v)
163{
164 struct dev_addr_list *m;
165 struct net_device *dev = v;
166
167 if (v == SEQ_START_TOKEN)
168 return 0;
169
170 netif_addr_lock_bh(dev);
171 for (m = dev->mc_list; m; m = m->next) {
172 int i;
173
174 seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
175 dev->name, m->dmi_users, m->dmi_gusers);
176
177 for (i = 0; i < m->dmi_addrlen; i++)
178 seq_printf(seq, "%02x", m->dmi_addr[i]);
179
180 seq_putc(seq, '\n');
181 }
182 netif_addr_unlock_bh(dev);
183 return 0;
184}
185
186static const struct seq_operations dev_mc_seq_ops = {
187 .start = dev_seq_start,
188 .next = dev_seq_next,
189 .stop = dev_seq_stop,
190 .show = dev_mc_seq_show,
191};
192
193static int dev_mc_seq_open(struct inode *inode, struct file *file)
194{
195 return seq_open_net(inode, file, &dev_mc_seq_ops,
196 sizeof(struct seq_net_private));
197}
198
199static const struct file_operations dev_mc_seq_fops = {
200 .owner = THIS_MODULE,
201 .open = dev_mc_seq_open,
202 .read = seq_read,
203 .llseek = seq_lseek,
204 .release = seq_release_net,
205};
206
207#endif
208
209static int __net_init dev_mc_net_init(struct net *net)
210{
211 if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
212 return -ENOMEM;
213 return 0;
214}
215
216static void __net_exit dev_mc_net_exit(struct net *net)
217{
218 proc_net_remove(net, "dev_mcast");
219}
220
221static struct pernet_operations __net_initdata dev_mc_net_ops = {
222 .init = dev_mc_net_init,
223 .exit = dev_mc_net_exit,
224};
225
226void __init dev_mcast_init(void)
227{
228 register_pernet_subsys(&dev_mc_net_ops);
229}
230
231EXPORT_SYMBOL(dev_mc_add);
232EXPORT_SYMBOL(dev_mc_delete);
diff --git a/net/core/dst.c b/net/core/dst.c
index f307bc18f6a0..9920722cc82b 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -44,7 +44,7 @@ static atomic_t dst_total = ATOMIC_INIT(0);
44 */ 44 */
45static struct { 45static struct {
46 spinlock_t lock; 46 spinlock_t lock;
47 struct dst_entry *list; 47 struct dst_entry *list;
48 unsigned long timer_inc; 48 unsigned long timer_inc;
49 unsigned long timer_expires; 49 unsigned long timer_expires;
50} dst_garbage = { 50} dst_garbage = {
@@ -52,7 +52,7 @@ static struct {
52 .timer_inc = DST_GC_MAX, 52 .timer_inc = DST_GC_MAX,
53}; 53};
54static void dst_gc_task(struct work_struct *work); 54static void dst_gc_task(struct work_struct *work);
55static void ___dst_free(struct dst_entry * dst); 55static void ___dst_free(struct dst_entry *dst);
56 56
57static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task); 57static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
58 58
@@ -136,8 +136,8 @@ loop:
136 } 136 }
137 expires = dst_garbage.timer_expires; 137 expires = dst_garbage.timer_expires;
138 /* 138 /*
139 * if the next desired timer is more than 4 seconds in the future 139 * if the next desired timer is more than 4 seconds in the
140 * then round the timer to whole seconds 140 * future then round the timer to whole seconds
141 */ 141 */
142 if (expires > 4*HZ) 142 if (expires > 4*HZ)
143 expires = round_jiffies_relative(expires); 143 expires = round_jiffies_relative(expires);
@@ -152,7 +152,8 @@ loop:
152 " expires: %lu elapsed: %lu us\n", 152 " expires: %lu elapsed: %lu us\n",
153 atomic_read(&dst_total), delayed, work_performed, 153 atomic_read(&dst_total), delayed, work_performed,
154 expires, 154 expires,
155 elapsed.tv_sec * USEC_PER_SEC + elapsed.tv_nsec / NSEC_PER_USEC); 155 elapsed.tv_sec * USEC_PER_SEC +
156 elapsed.tv_nsec / NSEC_PER_USEC);
156#endif 157#endif
157} 158}
158 159
@@ -163,9 +164,9 @@ int dst_discard(struct sk_buff *skb)
163} 164}
164EXPORT_SYMBOL(dst_discard); 165EXPORT_SYMBOL(dst_discard);
165 166
166void * dst_alloc(struct dst_ops * ops) 167void *dst_alloc(struct dst_ops *ops)
167{ 168{
168 struct dst_entry * dst; 169 struct dst_entry *dst;
169 170
170 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) { 171 if (ops->gc && atomic_read(&ops->entries) > ops->gc_thresh) {
171 if (ops->gc(ops)) 172 if (ops->gc(ops))
@@ -185,19 +186,20 @@ void * dst_alloc(struct dst_ops * ops)
185 atomic_inc(&ops->entries); 186 atomic_inc(&ops->entries);
186 return dst; 187 return dst;
187} 188}
189EXPORT_SYMBOL(dst_alloc);
188 190
189static void ___dst_free(struct dst_entry * dst) 191static void ___dst_free(struct dst_entry *dst)
190{ 192{
191 /* The first case (dev==NULL) is required, when 193 /* The first case (dev==NULL) is required, when
192 protocol module is unloaded. 194 protocol module is unloaded.
193 */ 195 */
194 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) { 196 if (dst->dev == NULL || !(dst->dev->flags&IFF_UP))
195 dst->input = dst->output = dst_discard; 197 dst->input = dst->output = dst_discard;
196 }
197 dst->obsolete = 2; 198 dst->obsolete = 2;
198} 199}
200EXPORT_SYMBOL(__dst_free);
199 201
200void __dst_free(struct dst_entry * dst) 202void __dst_free(struct dst_entry *dst)
201{ 203{
202 spin_lock_bh(&dst_garbage.lock); 204 spin_lock_bh(&dst_garbage.lock);
203 ___dst_free(dst); 205 ___dst_free(dst);
@@ -262,15 +264,16 @@ again:
262 } 264 }
263 return NULL; 265 return NULL;
264} 266}
267EXPORT_SYMBOL(dst_destroy);
265 268
266void dst_release(struct dst_entry *dst) 269void dst_release(struct dst_entry *dst)
267{ 270{
268 if (dst) { 271 if (dst) {
269 int newrefcnt; 272 int newrefcnt;
270 273
271 smp_mb__before_atomic_dec(); 274 smp_mb__before_atomic_dec();
272 newrefcnt = atomic_dec_return(&dst->__refcnt); 275 newrefcnt = atomic_dec_return(&dst->__refcnt);
273 WARN_ON(newrefcnt < 0); 276 WARN_ON(newrefcnt < 0);
274 } 277 }
275} 278}
276EXPORT_SYMBOL(dst_release); 279EXPORT_SYMBOL(dst_release);
@@ -283,8 +286,8 @@ EXPORT_SYMBOL(dst_release);
283 * 286 *
284 * Commented and originally written by Alexey. 287 * Commented and originally written by Alexey.
285 */ 288 */
286static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev, 289static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
287 int unregister) 290 int unregister)
288{ 291{
289 if (dst->ops->ifdown) 292 if (dst->ops->ifdown)
290 dst->ops->ifdown(dst, dev, unregister); 293 dst->ops->ifdown(dst, dev, unregister);
@@ -306,7 +309,8 @@ static inline void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
306 } 309 }
307} 310}
308 311
309static int dst_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 312static int dst_dev_event(struct notifier_block *this, unsigned long event,
313 void *ptr)
310{ 314{
311 struct net_device *dev = ptr; 315 struct net_device *dev = ptr;
312 struct dst_entry *dst, *last = NULL; 316 struct dst_entry *dst, *last = NULL;
@@ -329,9 +333,8 @@ static int dst_dev_event(struct notifier_block *this, unsigned long event, void
329 last->next = dst; 333 last->next = dst;
330 else 334 else
331 dst_busy_list = dst; 335 dst_busy_list = dst;
332 for (; dst; dst = dst->next) { 336 for (; dst; dst = dst->next)
333 dst_ifdown(dst, dev, event != NETDEV_DOWN); 337 dst_ifdown(dst, dev, event != NETDEV_DOWN);
334 }
335 mutex_unlock(&dst_gc_mutex); 338 mutex_unlock(&dst_gc_mutex);
336 break; 339 break;
337 } 340 }
@@ -346,7 +349,3 @@ void __init dst_init(void)
346{ 349{
347 register_netdevice_notifier(&dst_dev_notifier); 350 register_netdevice_notifier(&dst_dev_notifier);
348} 351}
349
350EXPORT_SYMBOL(__dst_free);
351EXPORT_SYMBOL(dst_alloc);
352EXPORT_SYMBOL(dst_destroy);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 9d55c57f318a..1a7db92037fa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -18,8 +18,8 @@
18#include <linux/ethtool.h> 18#include <linux/ethtool.h>
19#include <linux/netdevice.h> 19#include <linux/netdevice.h>
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/uaccess.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <asm/uaccess.h>
23 23
24/* 24/*
25 * Some useful ethtool_ops methods that're device independent. 25 * Some useful ethtool_ops methods that're device independent.
@@ -31,6 +31,7 @@ u32 ethtool_op_get_link(struct net_device *dev)
31{ 31{
32 return netif_carrier_ok(dev) ? 1 : 0; 32 return netif_carrier_ok(dev) ? 1 : 0;
33} 33}
34EXPORT_SYMBOL(ethtool_op_get_link);
34 35
35u32 ethtool_op_get_rx_csum(struct net_device *dev) 36u32 ethtool_op_get_rx_csum(struct net_device *dev)
36{ 37{
@@ -63,6 +64,7 @@ int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
63 64
64 return 0; 65 return 0;
65} 66}
67EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
66 68
67int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data) 69int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
68{ 70{
@@ -73,11 +75,13 @@ int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
73 75
74 return 0; 76 return 0;
75} 77}
78EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
76 79
77u32 ethtool_op_get_sg(struct net_device *dev) 80u32 ethtool_op_get_sg(struct net_device *dev)
78{ 81{
79 return (dev->features & NETIF_F_SG) != 0; 82 return (dev->features & NETIF_F_SG) != 0;
80} 83}
84EXPORT_SYMBOL(ethtool_op_get_sg);
81 85
82int ethtool_op_set_sg(struct net_device *dev, u32 data) 86int ethtool_op_set_sg(struct net_device *dev, u32 data)
83{ 87{
@@ -88,11 +92,13 @@ int ethtool_op_set_sg(struct net_device *dev, u32 data)
88 92
89 return 0; 93 return 0;
90} 94}
95EXPORT_SYMBOL(ethtool_op_set_sg);
91 96
92u32 ethtool_op_get_tso(struct net_device *dev) 97u32 ethtool_op_get_tso(struct net_device *dev)
93{ 98{
94 return (dev->features & NETIF_F_TSO) != 0; 99 return (dev->features & NETIF_F_TSO) != 0;
95} 100}
101EXPORT_SYMBOL(ethtool_op_get_tso);
96 102
97int ethtool_op_set_tso(struct net_device *dev, u32 data) 103int ethtool_op_set_tso(struct net_device *dev, u32 data)
98{ 104{
@@ -103,11 +109,13 @@ int ethtool_op_set_tso(struct net_device *dev, u32 data)
103 109
104 return 0; 110 return 0;
105} 111}
112EXPORT_SYMBOL(ethtool_op_set_tso);
106 113
107u32 ethtool_op_get_ufo(struct net_device *dev) 114u32 ethtool_op_get_ufo(struct net_device *dev)
108{ 115{
109 return (dev->features & NETIF_F_UFO) != 0; 116 return (dev->features & NETIF_F_UFO) != 0;
110} 117}
118EXPORT_SYMBOL(ethtool_op_get_ufo);
111 119
112int ethtool_op_set_ufo(struct net_device *dev, u32 data) 120int ethtool_op_set_ufo(struct net_device *dev, u32 data)
113{ 121{
@@ -117,12 +125,13 @@ int ethtool_op_set_ufo(struct net_device *dev, u32 data)
117 dev->features &= ~NETIF_F_UFO; 125 dev->features &= ~NETIF_F_UFO;
118 return 0; 126 return 0;
119} 127}
128EXPORT_SYMBOL(ethtool_op_set_ufo);
120 129
121/* the following list of flags are the same as their associated 130/* the following list of flags are the same as their associated
122 * NETIF_F_xxx values in include/linux/netdevice.h 131 * NETIF_F_xxx values in include/linux/netdevice.h
123 */ 132 */
124static const u32 flags_dup_features = 133static const u32 flags_dup_features =
125 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE); 134 (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH);
126 135
127u32 ethtool_op_get_flags(struct net_device *dev) 136u32 ethtool_op_get_flags(struct net_device *dev)
128{ 137{
@@ -133,6 +142,7 @@ u32 ethtool_op_get_flags(struct net_device *dev)
133 142
134 return dev->features & flags_dup_features; 143 return dev->features & flags_dup_features;
135} 144}
145EXPORT_SYMBOL(ethtool_op_get_flags);
136 146
137int ethtool_op_set_flags(struct net_device *dev, u32 data) 147int ethtool_op_set_flags(struct net_device *dev, u32 data)
138{ 148{
@@ -153,9 +163,15 @@ int ethtool_op_set_flags(struct net_device *dev, u32 data)
153 features &= ~NETIF_F_NTUPLE; 163 features &= ~NETIF_F_NTUPLE;
154 } 164 }
155 165
166 if (data & ETH_FLAG_RXHASH)
167 features |= NETIF_F_RXHASH;
168 else
169 features &= ~NETIF_F_RXHASH;
170
156 dev->features = features; 171 dev->features = features;
157 return 0; 172 return 0;
158} 173}
174EXPORT_SYMBOL(ethtool_op_set_flags);
159 175
160void ethtool_ntuple_flush(struct net_device *dev) 176void ethtool_ntuple_flush(struct net_device *dev)
161{ 177{
@@ -201,7 +217,8 @@ static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
201 return dev->ethtool_ops->set_settings(dev, &cmd); 217 return dev->ethtool_ops->set_settings(dev, &cmd);
202} 218}
203 219
204static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr) 220static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev,
221 void __user *useraddr)
205{ 222{
206 struct ethtool_drvinfo info; 223 struct ethtool_drvinfo info;
207 const struct ethtool_ops *ops = dev->ethtool_ops; 224 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -241,7 +258,7 @@ static noinline_for_stack int ethtool_get_drvinfo(struct net_device *dev, void _
241} 258}
242 259
243static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev, 260static noinline_for_stack int ethtool_get_sset_info(struct net_device *dev,
244 void __user *useraddr) 261 void __user *useraddr)
245{ 262{
246 struct ethtool_sset_info info; 263 struct ethtool_sset_info info;
247 const struct ethtool_ops *ops = dev->ethtool_ops; 264 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -300,7 +317,8 @@ out:
300 return ret; 317 return ret;
301} 318}
302 319
303static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __user *useraddr) 320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr)
304{ 322{
305 struct ethtool_rxnfc cmd; 323 struct ethtool_rxnfc cmd;
306 324
@@ -313,7 +331,8 @@ static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, void __u
313 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 331 return dev->ethtool_ops->set_rxnfc(dev, &cmd);
314} 332}
315 333
316static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, void __user *useraddr) 334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr)
317{ 336{
318 struct ethtool_rxnfc info; 337 struct ethtool_rxnfc info;
319 const struct ethtool_ops *ops = dev->ethtool_ops; 338 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -358,8 +377,8 @@ err_out:
358} 377}
359 378
360static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list, 379static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
361 struct ethtool_rx_ntuple_flow_spec *spec, 380 struct ethtool_rx_ntuple_flow_spec *spec,
362 struct ethtool_rx_ntuple_flow_spec_container *fsc) 381 struct ethtool_rx_ntuple_flow_spec_container *fsc)
363{ 382{
364 383
365 /* don't add filters forever */ 384 /* don't add filters forever */
@@ -385,7 +404,8 @@ static void __rx_ntuple_filter_add(struct ethtool_rx_ntuple_list *list,
385 list->count++; 404 list->count++;
386} 405}
387 406
388static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev, void __user *useraddr) 407static noinline_for_stack int ethtool_set_rx_ntuple(struct net_device *dev,
408 void __user *useraddr)
389{ 409{
390 struct ethtool_rx_ntuple cmd; 410 struct ethtool_rx_ntuple cmd;
391 const struct ethtool_ops *ops = dev->ethtool_ops; 411 const struct ethtool_ops *ops = dev->ethtool_ops;
@@ -510,125 +530,125 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
510 case UDP_V4_FLOW: 530 case UDP_V4_FLOW:
511 case SCTP_V4_FLOW: 531 case SCTP_V4_FLOW:
512 sprintf(p, "\tSrc IP addr: 0x%x\n", 532 sprintf(p, "\tSrc IP addr: 0x%x\n",
513 fsc->fs.h_u.tcp_ip4_spec.ip4src); 533 fsc->fs.h_u.tcp_ip4_spec.ip4src);
514 p += ETH_GSTRING_LEN; 534 p += ETH_GSTRING_LEN;
515 num_strings++; 535 num_strings++;
516 sprintf(p, "\tSrc IP mask: 0x%x\n", 536 sprintf(p, "\tSrc IP mask: 0x%x\n",
517 fsc->fs.m_u.tcp_ip4_spec.ip4src); 537 fsc->fs.m_u.tcp_ip4_spec.ip4src);
518 p += ETH_GSTRING_LEN; 538 p += ETH_GSTRING_LEN;
519 num_strings++; 539 num_strings++;
520 sprintf(p, "\tDest IP addr: 0x%x\n", 540 sprintf(p, "\tDest IP addr: 0x%x\n",
521 fsc->fs.h_u.tcp_ip4_spec.ip4dst); 541 fsc->fs.h_u.tcp_ip4_spec.ip4dst);
522 p += ETH_GSTRING_LEN; 542 p += ETH_GSTRING_LEN;
523 num_strings++; 543 num_strings++;
524 sprintf(p, "\tDest IP mask: 0x%x\n", 544 sprintf(p, "\tDest IP mask: 0x%x\n",
525 fsc->fs.m_u.tcp_ip4_spec.ip4dst); 545 fsc->fs.m_u.tcp_ip4_spec.ip4dst);
526 p += ETH_GSTRING_LEN; 546 p += ETH_GSTRING_LEN;
527 num_strings++; 547 num_strings++;
528 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n", 548 sprintf(p, "\tSrc Port: %d, mask: 0x%x\n",
529 fsc->fs.h_u.tcp_ip4_spec.psrc, 549 fsc->fs.h_u.tcp_ip4_spec.psrc,
530 fsc->fs.m_u.tcp_ip4_spec.psrc); 550 fsc->fs.m_u.tcp_ip4_spec.psrc);
531 p += ETH_GSTRING_LEN; 551 p += ETH_GSTRING_LEN;
532 num_strings++; 552 num_strings++;
533 sprintf(p, "\tDest Port: %d, mask: 0x%x\n", 553 sprintf(p, "\tDest Port: %d, mask: 0x%x\n",
534 fsc->fs.h_u.tcp_ip4_spec.pdst, 554 fsc->fs.h_u.tcp_ip4_spec.pdst,
535 fsc->fs.m_u.tcp_ip4_spec.pdst); 555 fsc->fs.m_u.tcp_ip4_spec.pdst);
536 p += ETH_GSTRING_LEN; 556 p += ETH_GSTRING_LEN;
537 num_strings++; 557 num_strings++;
538 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 558 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
539 fsc->fs.h_u.tcp_ip4_spec.tos, 559 fsc->fs.h_u.tcp_ip4_spec.tos,
540 fsc->fs.m_u.tcp_ip4_spec.tos); 560 fsc->fs.m_u.tcp_ip4_spec.tos);
541 p += ETH_GSTRING_LEN; 561 p += ETH_GSTRING_LEN;
542 num_strings++; 562 num_strings++;
543 break; 563 break;
544 case AH_ESP_V4_FLOW: 564 case AH_ESP_V4_FLOW:
545 case ESP_V4_FLOW: 565 case ESP_V4_FLOW:
546 sprintf(p, "\tSrc IP addr: 0x%x\n", 566 sprintf(p, "\tSrc IP addr: 0x%x\n",
547 fsc->fs.h_u.ah_ip4_spec.ip4src); 567 fsc->fs.h_u.ah_ip4_spec.ip4src);
548 p += ETH_GSTRING_LEN; 568 p += ETH_GSTRING_LEN;
549 num_strings++; 569 num_strings++;
550 sprintf(p, "\tSrc IP mask: 0x%x\n", 570 sprintf(p, "\tSrc IP mask: 0x%x\n",
551 fsc->fs.m_u.ah_ip4_spec.ip4src); 571 fsc->fs.m_u.ah_ip4_spec.ip4src);
552 p += ETH_GSTRING_LEN; 572 p += ETH_GSTRING_LEN;
553 num_strings++; 573 num_strings++;
554 sprintf(p, "\tDest IP addr: 0x%x\n", 574 sprintf(p, "\tDest IP addr: 0x%x\n",
555 fsc->fs.h_u.ah_ip4_spec.ip4dst); 575 fsc->fs.h_u.ah_ip4_spec.ip4dst);
556 p += ETH_GSTRING_LEN; 576 p += ETH_GSTRING_LEN;
557 num_strings++; 577 num_strings++;
558 sprintf(p, "\tDest IP mask: 0x%x\n", 578 sprintf(p, "\tDest IP mask: 0x%x\n",
559 fsc->fs.m_u.ah_ip4_spec.ip4dst); 579 fsc->fs.m_u.ah_ip4_spec.ip4dst);
560 p += ETH_GSTRING_LEN; 580 p += ETH_GSTRING_LEN;
561 num_strings++; 581 num_strings++;
562 sprintf(p, "\tSPI: %d, mask: 0x%x\n", 582 sprintf(p, "\tSPI: %d, mask: 0x%x\n",
563 fsc->fs.h_u.ah_ip4_spec.spi, 583 fsc->fs.h_u.ah_ip4_spec.spi,
564 fsc->fs.m_u.ah_ip4_spec.spi); 584 fsc->fs.m_u.ah_ip4_spec.spi);
565 p += ETH_GSTRING_LEN; 585 p += ETH_GSTRING_LEN;
566 num_strings++; 586 num_strings++;
567 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 587 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
568 fsc->fs.h_u.ah_ip4_spec.tos, 588 fsc->fs.h_u.ah_ip4_spec.tos,
569 fsc->fs.m_u.ah_ip4_spec.tos); 589 fsc->fs.m_u.ah_ip4_spec.tos);
570 p += ETH_GSTRING_LEN; 590 p += ETH_GSTRING_LEN;
571 num_strings++; 591 num_strings++;
572 break; 592 break;
573 case IP_USER_FLOW: 593 case IP_USER_FLOW:
574 sprintf(p, "\tSrc IP addr: 0x%x\n", 594 sprintf(p, "\tSrc IP addr: 0x%x\n",
575 fsc->fs.h_u.raw_ip4_spec.ip4src); 595 fsc->fs.h_u.raw_ip4_spec.ip4src);
576 p += ETH_GSTRING_LEN; 596 p += ETH_GSTRING_LEN;
577 num_strings++; 597 num_strings++;
578 sprintf(p, "\tSrc IP mask: 0x%x\n", 598 sprintf(p, "\tSrc IP mask: 0x%x\n",
579 fsc->fs.m_u.raw_ip4_spec.ip4src); 599 fsc->fs.m_u.raw_ip4_spec.ip4src);
580 p += ETH_GSTRING_LEN; 600 p += ETH_GSTRING_LEN;
581 num_strings++; 601 num_strings++;
582 sprintf(p, "\tDest IP addr: 0x%x\n", 602 sprintf(p, "\tDest IP addr: 0x%x\n",
583 fsc->fs.h_u.raw_ip4_spec.ip4dst); 603 fsc->fs.h_u.raw_ip4_spec.ip4dst);
584 p += ETH_GSTRING_LEN; 604 p += ETH_GSTRING_LEN;
585 num_strings++; 605 num_strings++;
586 sprintf(p, "\tDest IP mask: 0x%x\n", 606 sprintf(p, "\tDest IP mask: 0x%x\n",
587 fsc->fs.m_u.raw_ip4_spec.ip4dst); 607 fsc->fs.m_u.raw_ip4_spec.ip4dst);
588 p += ETH_GSTRING_LEN; 608 p += ETH_GSTRING_LEN;
589 num_strings++; 609 num_strings++;
590 break; 610 break;
591 case IPV4_FLOW: 611 case IPV4_FLOW:
592 sprintf(p, "\tSrc IP addr: 0x%x\n", 612 sprintf(p, "\tSrc IP addr: 0x%x\n",
593 fsc->fs.h_u.usr_ip4_spec.ip4src); 613 fsc->fs.h_u.usr_ip4_spec.ip4src);
594 p += ETH_GSTRING_LEN; 614 p += ETH_GSTRING_LEN;
595 num_strings++; 615 num_strings++;
596 sprintf(p, "\tSrc IP mask: 0x%x\n", 616 sprintf(p, "\tSrc IP mask: 0x%x\n",
597 fsc->fs.m_u.usr_ip4_spec.ip4src); 617 fsc->fs.m_u.usr_ip4_spec.ip4src);
598 p += ETH_GSTRING_LEN; 618 p += ETH_GSTRING_LEN;
599 num_strings++; 619 num_strings++;
600 sprintf(p, "\tDest IP addr: 0x%x\n", 620 sprintf(p, "\tDest IP addr: 0x%x\n",
601 fsc->fs.h_u.usr_ip4_spec.ip4dst); 621 fsc->fs.h_u.usr_ip4_spec.ip4dst);
602 p += ETH_GSTRING_LEN; 622 p += ETH_GSTRING_LEN;
603 num_strings++; 623 num_strings++;
604 sprintf(p, "\tDest IP mask: 0x%x\n", 624 sprintf(p, "\tDest IP mask: 0x%x\n",
605 fsc->fs.m_u.usr_ip4_spec.ip4dst); 625 fsc->fs.m_u.usr_ip4_spec.ip4dst);
606 p += ETH_GSTRING_LEN; 626 p += ETH_GSTRING_LEN;
607 num_strings++; 627 num_strings++;
608 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n", 628 sprintf(p, "\tL4 bytes: 0x%x, mask: 0x%x\n",
609 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes, 629 fsc->fs.h_u.usr_ip4_spec.l4_4_bytes,
610 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes); 630 fsc->fs.m_u.usr_ip4_spec.l4_4_bytes);
611 p += ETH_GSTRING_LEN; 631 p += ETH_GSTRING_LEN;
612 num_strings++; 632 num_strings++;
613 sprintf(p, "\tTOS: %d, mask: 0x%x\n", 633 sprintf(p, "\tTOS: %d, mask: 0x%x\n",
614 fsc->fs.h_u.usr_ip4_spec.tos, 634 fsc->fs.h_u.usr_ip4_spec.tos,
615 fsc->fs.m_u.usr_ip4_spec.tos); 635 fsc->fs.m_u.usr_ip4_spec.tos);
616 p += ETH_GSTRING_LEN; 636 p += ETH_GSTRING_LEN;
617 num_strings++; 637 num_strings++;
618 sprintf(p, "\tIP Version: %d, mask: 0x%x\n", 638 sprintf(p, "\tIP Version: %d, mask: 0x%x\n",
619 fsc->fs.h_u.usr_ip4_spec.ip_ver, 639 fsc->fs.h_u.usr_ip4_spec.ip_ver,
620 fsc->fs.m_u.usr_ip4_spec.ip_ver); 640 fsc->fs.m_u.usr_ip4_spec.ip_ver);
621 p += ETH_GSTRING_LEN; 641 p += ETH_GSTRING_LEN;
622 num_strings++; 642 num_strings++;
623 sprintf(p, "\tProtocol: %d, mask: 0x%x\n", 643 sprintf(p, "\tProtocol: %d, mask: 0x%x\n",
624 fsc->fs.h_u.usr_ip4_spec.proto, 644 fsc->fs.h_u.usr_ip4_spec.proto,
625 fsc->fs.m_u.usr_ip4_spec.proto); 645 fsc->fs.m_u.usr_ip4_spec.proto);
626 p += ETH_GSTRING_LEN; 646 p += ETH_GSTRING_LEN;
627 num_strings++; 647 num_strings++;
628 break; 648 break;
629 }; 649 };
630 sprintf(p, "\tVLAN: %d, mask: 0x%x\n", 650 sprintf(p, "\tVLAN: %d, mask: 0x%x\n",
631 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask); 651 fsc->fs.vlan_tag, fsc->fs.vlan_tag_mask);
632 p += ETH_GSTRING_LEN; 652 p += ETH_GSTRING_LEN;
633 num_strings++; 653 num_strings++;
634 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data); 654 sprintf(p, "\tUser-defined: 0x%Lx\n", fsc->fs.data);
@@ -641,7 +661,7 @@ static int ethtool_get_rx_ntuple(struct net_device *dev, void __user *useraddr)
641 sprintf(p, "\tAction: Drop\n"); 661 sprintf(p, "\tAction: Drop\n");
642 else 662 else
643 sprintf(p, "\tAction: Direct to queue %d\n", 663 sprintf(p, "\tAction: Direct to queue %d\n",
644 fsc->fs.action); 664 fsc->fs.action);
645 p += ETH_GSTRING_LEN; 665 p += ETH_GSTRING_LEN;
646 num_strings++; 666 num_strings++;
647unknown_filter: 667unknown_filter:
@@ -853,7 +873,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
853 return ret; 873 return ret;
854} 874}
855 875
856static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr) 876static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev,
877 void __user *useraddr)
857{ 878{
858 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE }; 879 struct ethtool_coalesce coalesce = { .cmd = ETHTOOL_GCOALESCE };
859 880
@@ -867,7 +888,8 @@ static noinline_for_stack int ethtool_get_coalesce(struct net_device *dev, void
867 return 0; 888 return 0;
868} 889}
869 890
870static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr) 891static noinline_for_stack int ethtool_set_coalesce(struct net_device *dev,
892 void __user *useraddr)
871{ 893{
872 struct ethtool_coalesce coalesce; 894 struct ethtool_coalesce coalesce;
873 895
@@ -971,6 +993,7 @@ static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
971 993
972 return dev->ethtool_ops->set_tx_csum(dev, edata.data); 994 return dev->ethtool_ops->set_tx_csum(dev, edata.data);
973} 995}
996EXPORT_SYMBOL(ethtool_op_set_tx_csum);
974 997
975static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr) 998static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
976{ 999{
@@ -1042,7 +1065,7 @@ static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
1042 1065
1043 edata.data = dev->features & NETIF_F_GSO; 1066 edata.data = dev->features & NETIF_F_GSO;
1044 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1067 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1045 return -EFAULT; 1068 return -EFAULT;
1046 return 0; 1069 return 0;
1047} 1070}
1048 1071
@@ -1065,7 +1088,7 @@ static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
1065 1088
1066 edata.data = dev->features & NETIF_F_GRO; 1089 edata.data = dev->features & NETIF_F_GRO;
1067 if (copy_to_user(useraddr, &edata, sizeof(edata))) 1090 if (copy_to_user(useraddr, &edata, sizeof(edata)))
1068 return -EFAULT; 1091 return -EFAULT;
1069 return 0; 1092 return 0;
1070} 1093}
1071 1094
@@ -1277,7 +1300,8 @@ static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
1277 return actor(dev, edata.data); 1300 return actor(dev, edata.data);
1278} 1301}
1279 1302
1280static noinline_for_stack int ethtool_flash_device(struct net_device *dev, char __user *useraddr) 1303static noinline_for_stack int ethtool_flash_device(struct net_device *dev,
1304 char __user *useraddr)
1281{ 1305{
1282 struct ethtool_flash efl; 1306 struct ethtool_flash efl;
1283 1307
@@ -1306,11 +1330,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1306 if (!dev->ethtool_ops) 1330 if (!dev->ethtool_ops)
1307 return -EOPNOTSUPP; 1331 return -EOPNOTSUPP;
1308 1332
1309 if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd))) 1333 if (copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1310 return -EFAULT; 1334 return -EFAULT;
1311 1335
1312 /* Allow some commands to be done by anyone */ 1336 /* Allow some commands to be done by anyone */
1313 switch(ethcmd) { 1337 switch (ethcmd) {
1314 case ETHTOOL_GDRVINFO: 1338 case ETHTOOL_GDRVINFO:
1315 case ETHTOOL_GMSGLVL: 1339 case ETHTOOL_GMSGLVL:
1316 case ETHTOOL_GCOALESCE: 1340 case ETHTOOL_GCOALESCE:
@@ -1338,10 +1362,11 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1338 return -EPERM; 1362 return -EPERM;
1339 } 1363 }
1340 1364
1341 if (dev->ethtool_ops->begin) 1365 if (dev->ethtool_ops->begin) {
1342 if ((rc = dev->ethtool_ops->begin(dev)) < 0) 1366 rc = dev->ethtool_ops->begin(dev);
1367 if (rc < 0)
1343 return rc; 1368 return rc;
1344 1369 }
1345 old_features = dev->features; 1370 old_features = dev->features;
1346 1371
1347 switch (ethcmd) { 1372 switch (ethcmd) {
@@ -1531,16 +1556,3 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1531 1556
1532 return rc; 1557 return rc;
1533} 1558}
1534
1535EXPORT_SYMBOL(ethtool_op_get_link);
1536EXPORT_SYMBOL(ethtool_op_get_sg);
1537EXPORT_SYMBOL(ethtool_op_get_tso);
1538EXPORT_SYMBOL(ethtool_op_set_sg);
1539EXPORT_SYMBOL(ethtool_op_set_tso);
1540EXPORT_SYMBOL(ethtool_op_set_tx_csum);
1541EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
1542EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
1543EXPORT_SYMBOL(ethtool_op_set_ufo);
1544EXPORT_SYMBOL(ethtool_op_get_ufo);
1545EXPORT_SYMBOL(ethtool_op_set_flags);
1546EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index d2c3e7dc2e5f..1bc66592453c 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -39,6 +39,24 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
39} 39}
40EXPORT_SYMBOL(fib_default_rule_add); 40EXPORT_SYMBOL(fib_default_rule_add);
41 41
42u32 fib_default_rule_pref(struct fib_rules_ops *ops)
43{
44 struct list_head *pos;
45 struct fib_rule *rule;
46
47 if (!list_empty(&ops->rules_list)) {
48 pos = ops->rules_list.next;
49 if (pos->next != &ops->rules_list) {
50 rule = list_entry(pos->next, struct fib_rule, list);
51 if (rule->pref)
52 return rule->pref - 1;
53 }
54 }
55
56 return 0;
57}
58EXPORT_SYMBOL(fib_default_rule_pref);
59
42static void notify_rule_change(int event, struct fib_rule *rule, 60static void notify_rule_change(int event, struct fib_rule *rule,
43 struct fib_rules_ops *ops, struct nlmsghdr *nlh, 61 struct fib_rules_ops *ops, struct nlmsghdr *nlh,
44 u32 pid); 62 u32 pid);
@@ -109,7 +127,7 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
109 struct fib_rules_ops *ops; 127 struct fib_rules_ops *ops;
110 int err; 128 int err;
111 129
112 ops = kmemdup(tmpl, sizeof (*ops), GFP_KERNEL); 130 ops = kmemdup(tmpl, sizeof(*ops), GFP_KERNEL);
113 if (ops == NULL) 131 if (ops == NULL)
114 return ERR_PTR(-ENOMEM); 132 return ERR_PTR(-ENOMEM);
115 133
@@ -124,7 +142,6 @@ fib_rules_register(struct fib_rules_ops *tmpl, struct net *net)
124 142
125 return ops; 143 return ops;
126} 144}
127
128EXPORT_SYMBOL_GPL(fib_rules_register); 145EXPORT_SYMBOL_GPL(fib_rules_register);
129 146
130void fib_rules_cleanup_ops(struct fib_rules_ops *ops) 147void fib_rules_cleanup_ops(struct fib_rules_ops *ops)
@@ -158,7 +175,6 @@ void fib_rules_unregister(struct fib_rules_ops *ops)
158 175
159 call_rcu(&ops->rcu, fib_rules_put_rcu); 176 call_rcu(&ops->rcu, fib_rules_put_rcu);
160} 177}
161
162EXPORT_SYMBOL_GPL(fib_rules_unregister); 178EXPORT_SYMBOL_GPL(fib_rules_unregister);
163 179
164static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops, 180static int fib_rule_match(struct fib_rule *rule, struct fib_rules_ops *ops,
@@ -221,7 +237,6 @@ out:
221 237
222 return err; 238 return err;
223} 239}
224
225EXPORT_SYMBOL_GPL(fib_rules_lookup); 240EXPORT_SYMBOL_GPL(fib_rules_lookup);
226 241
227static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb, 242static int validate_rulemsg(struct fib_rule_hdr *frh, struct nlattr **tb,
@@ -520,6 +535,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
520 return -EMSGSIZE; 535 return -EMSGSIZE;
521 536
522 frh = nlmsg_data(nlh); 537 frh = nlmsg_data(nlh);
538 frh->family = ops->family;
523 frh->table = rule->table; 539 frh->table = rule->table;
524 NLA_PUT_U32(skb, FRA_TABLE, rule->table); 540 NLA_PUT_U32(skb, FRA_TABLE, rule->table);
525 frh->res1 = 0; 541 frh->res1 = 0;
@@ -614,7 +630,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
614 break; 630 break;
615 631
616 cb->args[1] = 0; 632 cb->args[1] = 0;
617 skip: 633skip:
618 idx++; 634 idx++;
619 } 635 }
620 rcu_read_unlock(); 636 rcu_read_unlock();
@@ -686,7 +702,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
686 struct fib_rules_ops *ops; 702 struct fib_rules_ops *ops;
687 703
688 ASSERT_RTNL(); 704 ASSERT_RTNL();
689 rcu_read_lock();
690 705
691 switch (event) { 706 switch (event) {
692 case NETDEV_REGISTER: 707 case NETDEV_REGISTER:
@@ -700,8 +715,6 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
700 break; 715 break;
701 } 716 }
702 717
703 rcu_read_unlock();
704
705 return NOTIFY_DONE; 718 return NOTIFY_DONE;
706} 719}
707 720
diff --git a/net/core/flow.c b/net/core/flow.c
index 96015871ecea..161900674009 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -26,113 +26,158 @@
26#include <linux/security.h> 26#include <linux/security.h>
27 27
28struct flow_cache_entry { 28struct flow_cache_entry {
29 struct flow_cache_entry *next; 29 union {
30 u16 family; 30 struct hlist_node hlist;
31 u8 dir; 31 struct list_head gc_list;
32 u32 genid; 32 } u;
33 struct flowi key; 33 u16 family;
34 void *object; 34 u8 dir;
35 atomic_t *object_ref; 35 u32 genid;
36 struct flowi key;
37 struct flow_cache_object *object;
36}; 38};
37 39
38atomic_t flow_cache_genid = ATOMIC_INIT(0); 40struct flow_cache_percpu {
39 41 struct hlist_head *hash_table;
40static u32 flow_hash_shift; 42 int hash_count;
41#define flow_hash_size (1 << flow_hash_shift) 43 u32 hash_rnd;
42static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; 44 int hash_rnd_recalc;
43 45 struct tasklet_struct flush_tasklet;
44#define flow_table(cpu) (per_cpu(flow_tables, cpu)) 46};
45
46static struct kmem_cache *flow_cachep __read_mostly;
47 47
48static int flow_lwm, flow_hwm; 48struct flow_flush_info {
49 struct flow_cache *cache;
50 atomic_t cpuleft;
51 struct completion completion;
52};
49 53
50struct flow_percpu_info { 54struct flow_cache {
51 int hash_rnd_recalc; 55 u32 hash_shift;
52 u32 hash_rnd; 56 unsigned long order;
53 int count; 57 struct flow_cache_percpu *percpu;
58 struct notifier_block hotcpu_notifier;
59 int low_watermark;
60 int high_watermark;
61 struct timer_list rnd_timer;
54}; 62};
55static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
56 63
57#define flow_hash_rnd_recalc(cpu) \ 64atomic_t flow_cache_genid = ATOMIC_INIT(0);
58 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc) 65static struct flow_cache flow_cache_global;
59#define flow_hash_rnd(cpu) \ 66static struct kmem_cache *flow_cachep;
60 (per_cpu(flow_hash_info, cpu).hash_rnd)
61#define flow_count(cpu) \
62 (per_cpu(flow_hash_info, cpu).count)
63 67
64static struct timer_list flow_hash_rnd_timer; 68static DEFINE_SPINLOCK(flow_cache_gc_lock);
69static LIST_HEAD(flow_cache_gc_list);
65 70
66#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ) 71#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
67 72#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
68struct flow_flush_info {
69 atomic_t cpuleft;
70 struct completion completion;
71};
72static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
73
74#define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
75 73
76static void flow_cache_new_hashrnd(unsigned long arg) 74static void flow_cache_new_hashrnd(unsigned long arg)
77{ 75{
76 struct flow_cache *fc = (void *) arg;
78 int i; 77 int i;
79 78
80 for_each_possible_cpu(i) 79 for_each_possible_cpu(i)
81 flow_hash_rnd_recalc(i) = 1; 80 per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1;
82 81
83 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 82 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
84 add_timer(&flow_hash_rnd_timer); 83 add_timer(&fc->rnd_timer);
84}
85
86static int flow_entry_valid(struct flow_cache_entry *fle)
87{
88 if (atomic_read(&flow_cache_genid) != fle->genid)
89 return 0;
90 if (fle->object && !fle->object->ops->check(fle->object))
91 return 0;
92 return 1;
85} 93}
86 94
87static void flow_entry_kill(int cpu, struct flow_cache_entry *fle) 95static void flow_entry_kill(struct flow_cache_entry *fle)
88{ 96{
89 if (fle->object) 97 if (fle->object)
90 atomic_dec(fle->object_ref); 98 fle->object->ops->delete(fle->object);
91 kmem_cache_free(flow_cachep, fle); 99 kmem_cache_free(flow_cachep, fle);
92 flow_count(cpu)--;
93} 100}
94 101
95static void __flow_cache_shrink(int cpu, int shrink_to) 102static void flow_cache_gc_task(struct work_struct *work)
96{ 103{
97 struct flow_cache_entry *fle, **flp; 104 struct list_head gc_list;
98 int i; 105 struct flow_cache_entry *fce, *n;
99 106
100 for (i = 0; i < flow_hash_size; i++) { 107 INIT_LIST_HEAD(&gc_list);
101 int k = 0; 108 spin_lock_bh(&flow_cache_gc_lock);
109 list_splice_tail_init(&flow_cache_gc_list, &gc_list);
110 spin_unlock_bh(&flow_cache_gc_lock);
102 111
103 flp = &flow_table(cpu)[i]; 112 list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
104 while ((fle = *flp) != NULL && k < shrink_to) { 113 flow_entry_kill(fce);
105 k++; 114}
106 flp = &fle->next; 115static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
107 } 116
108 while ((fle = *flp) != NULL) { 117static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
109 *flp = fle->next; 118 int deleted, struct list_head *gc_list)
110 flow_entry_kill(cpu, fle); 119{
111 } 120 if (deleted) {
121 fcp->hash_count -= deleted;
122 spin_lock_bh(&flow_cache_gc_lock);
123 list_splice_tail(gc_list, &flow_cache_gc_list);
124 spin_unlock_bh(&flow_cache_gc_lock);
125 schedule_work(&flow_cache_gc_work);
112 } 126 }
113} 127}
114 128
115static void flow_cache_shrink(int cpu) 129static void __flow_cache_shrink(struct flow_cache *fc,
130 struct flow_cache_percpu *fcp,
131 int shrink_to)
116{ 132{
117 int shrink_to = flow_lwm / flow_hash_size; 133 struct flow_cache_entry *fle;
134 struct hlist_node *entry, *tmp;
135 LIST_HEAD(gc_list);
136 int i, deleted = 0;
137
138 for (i = 0; i < flow_cache_hash_size(fc); i++) {
139 int saved = 0;
140
141 hlist_for_each_entry_safe(fle, entry, tmp,
142 &fcp->hash_table[i], u.hlist) {
143 if (saved < shrink_to &&
144 flow_entry_valid(fle)) {
145 saved++;
146 } else {
147 deleted++;
148 hlist_del(&fle->u.hlist);
149 list_add_tail(&fle->u.gc_list, &gc_list);
150 }
151 }
152 }
118 153
119 __flow_cache_shrink(cpu, shrink_to); 154 flow_cache_queue_garbage(fcp, deleted, &gc_list);
120} 155}
121 156
122static void flow_new_hash_rnd(int cpu) 157static void flow_cache_shrink(struct flow_cache *fc,
158 struct flow_cache_percpu *fcp)
123{ 159{
124 get_random_bytes(&flow_hash_rnd(cpu), sizeof(u32)); 160 int shrink_to = fc->low_watermark / flow_cache_hash_size(fc);
125 flow_hash_rnd_recalc(cpu) = 0;
126 161
127 __flow_cache_shrink(cpu, 0); 162 __flow_cache_shrink(fc, fcp, shrink_to);
128} 163}
129 164
130static u32 flow_hash_code(struct flowi *key, int cpu) 165static void flow_new_hash_rnd(struct flow_cache *fc,
166 struct flow_cache_percpu *fcp)
167{
168 get_random_bytes(&fcp->hash_rnd, sizeof(u32));
169 fcp->hash_rnd_recalc = 0;
170 __flow_cache_shrink(fc, fcp, 0);
171}
172
173static u32 flow_hash_code(struct flow_cache *fc,
174 struct flow_cache_percpu *fcp,
175 struct flowi *key)
131{ 176{
132 u32 *k = (u32 *) key; 177 u32 *k = (u32 *) key;
133 178
134 return (jhash2(k, (sizeof(*key) / sizeof(u32)), flow_hash_rnd(cpu)) & 179 return (jhash2(k, (sizeof(*key) / sizeof(u32)), fcp->hash_rnd)
135 (flow_hash_size - 1)); 180 & (flow_cache_hash_size(fc) - 1));
136} 181}
137 182
138#if (BITS_PER_LONG == 64) 183#if (BITS_PER_LONG == 64)
@@ -165,114 +210,117 @@ static int flow_key_compare(struct flowi *key1, struct flowi *key2)
165 return 0; 210 return 0;
166} 211}
167 212
168void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir, 213struct flow_cache_object *
169 flow_resolve_t resolver) 214flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
215 flow_resolve_t resolver, void *ctx)
170{ 216{
171 struct flow_cache_entry *fle, **head; 217 struct flow_cache *fc = &flow_cache_global;
218 struct flow_cache_percpu *fcp;
219 struct flow_cache_entry *fle, *tfle;
220 struct hlist_node *entry;
221 struct flow_cache_object *flo;
172 unsigned int hash; 222 unsigned int hash;
173 int cpu;
174 223
175 local_bh_disable(); 224 local_bh_disable();
176 cpu = smp_processor_id(); 225 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
177 226
178 fle = NULL; 227 fle = NULL;
228 flo = NULL;
179 /* Packet really early in init? Making flow_cache_init a 229 /* Packet really early in init? Making flow_cache_init a
180 * pre-smp initcall would solve this. --RR */ 230 * pre-smp initcall would solve this. --RR */
181 if (!flow_table(cpu)) 231 if (!fcp->hash_table)
182 goto nocache; 232 goto nocache;
183 233
184 if (flow_hash_rnd_recalc(cpu)) 234 if (fcp->hash_rnd_recalc)
185 flow_new_hash_rnd(cpu); 235 flow_new_hash_rnd(fc, fcp);
186 hash = flow_hash_code(key, cpu);
187 236
188 head = &flow_table(cpu)[hash]; 237 hash = flow_hash_code(fc, fcp, key);
189 for (fle = *head; fle; fle = fle->next) { 238 hlist_for_each_entry(tfle, entry, &fcp->hash_table[hash], u.hlist) {
190 if (fle->family == family && 239 if (tfle->family == family &&
191 fle->dir == dir && 240 tfle->dir == dir &&
192 flow_key_compare(key, &fle->key) == 0) { 241 flow_key_compare(key, &tfle->key) == 0) {
193 if (fle->genid == atomic_read(&flow_cache_genid)) { 242 fle = tfle;
194 void *ret = fle->object;
195
196 if (ret)
197 atomic_inc(fle->object_ref);
198 local_bh_enable();
199
200 return ret;
201 }
202 break; 243 break;
203 } 244 }
204 } 245 }
205 246
206 if (!fle) { 247 if (unlikely(!fle)) {
207 if (flow_count(cpu) > flow_hwm) 248 if (fcp->hash_count > fc->high_watermark)
208 flow_cache_shrink(cpu); 249 flow_cache_shrink(fc, fcp);
209 250
210 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); 251 fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
211 if (fle) { 252 if (fle) {
212 fle->next = *head;
213 *head = fle;
214 fle->family = family; 253 fle->family = family;
215 fle->dir = dir; 254 fle->dir = dir;
216 memcpy(&fle->key, key, sizeof(*key)); 255 memcpy(&fle->key, key, sizeof(*key));
217 fle->object = NULL; 256 fle->object = NULL;
218 flow_count(cpu)++; 257 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
258 fcp->hash_count++;
219 } 259 }
260 } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
261 flo = fle->object;
262 if (!flo)
263 goto ret_object;
264 flo = flo->ops->get(flo);
265 if (flo)
266 goto ret_object;
267 } else if (fle->object) {
268 flo = fle->object;
269 flo->ops->delete(flo);
270 fle->object = NULL;
220 } 271 }
221 272
222nocache: 273nocache:
223 { 274 flo = NULL;
224 int err; 275 if (fle) {
225 void *obj; 276 flo = fle->object;
226 atomic_t *obj_ref; 277 fle->object = NULL;
227
228 err = resolver(net, key, family, dir, &obj, &obj_ref);
229
230 if (fle && !err) {
231 fle->genid = atomic_read(&flow_cache_genid);
232
233 if (fle->object)
234 atomic_dec(fle->object_ref);
235
236 fle->object = obj;
237 fle->object_ref = obj_ref;
238 if (obj)
239 atomic_inc(fle->object_ref);
240 }
241 local_bh_enable();
242
243 if (err)
244 obj = ERR_PTR(err);
245 return obj;
246 } 278 }
279 flo = resolver(net, key, family, dir, flo, ctx);
280 if (fle) {
281 fle->genid = atomic_read(&flow_cache_genid);
282 if (!IS_ERR(flo))
283 fle->object = flo;
284 else
285 fle->genid--;
286 } else {
287 if (flo && !IS_ERR(flo))
288 flo->ops->delete(flo);
289 }
290ret_object:
291 local_bh_enable();
292 return flo;
247} 293}
248 294
249static void flow_cache_flush_tasklet(unsigned long data) 295static void flow_cache_flush_tasklet(unsigned long data)
250{ 296{
251 struct flow_flush_info *info = (void *)data; 297 struct flow_flush_info *info = (void *)data;
252 int i; 298 struct flow_cache *fc = info->cache;
253 int cpu; 299 struct flow_cache_percpu *fcp;
254 300 struct flow_cache_entry *fle;
255 cpu = smp_processor_id(); 301 struct hlist_node *entry, *tmp;
256 for (i = 0; i < flow_hash_size; i++) { 302 LIST_HEAD(gc_list);
257 struct flow_cache_entry *fle; 303 int i, deleted = 0;
258 304
259 fle = flow_table(cpu)[i]; 305 fcp = per_cpu_ptr(fc->percpu, smp_processor_id());
260 for (; fle; fle = fle->next) { 306 for (i = 0; i < flow_cache_hash_size(fc); i++) {
261 unsigned genid = atomic_read(&flow_cache_genid); 307 hlist_for_each_entry_safe(fle, entry, tmp,
262 308 &fcp->hash_table[i], u.hlist) {
263 if (!fle->object || fle->genid == genid) 309 if (flow_entry_valid(fle))
264 continue; 310 continue;
265 311
266 fle->object = NULL; 312 deleted++;
267 atomic_dec(fle->object_ref); 313 hlist_del(&fle->u.hlist);
314 list_add_tail(&fle->u.gc_list, &gc_list);
268 } 315 }
269 } 316 }
270 317
318 flow_cache_queue_garbage(fcp, deleted, &gc_list);
319
271 if (atomic_dec_and_test(&info->cpuleft)) 320 if (atomic_dec_and_test(&info->cpuleft))
272 complete(&info->completion); 321 complete(&info->completion);
273} 322}
274 323
275static void flow_cache_flush_per_cpu(void *) __attribute__((__unused__));
276static void flow_cache_flush_per_cpu(void *data) 324static void flow_cache_flush_per_cpu(void *data)
277{ 325{
278 struct flow_flush_info *info = data; 326 struct flow_flush_info *info = data;
@@ -280,8 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)
280 struct tasklet_struct *tasklet; 328 struct tasklet_struct *tasklet;
281 329
282 cpu = smp_processor_id(); 330 cpu = smp_processor_id();
283 331 tasklet = &per_cpu_ptr(info->cache->percpu, cpu)->flush_tasklet;
284 tasklet = flow_flush_tasklet(cpu);
285 tasklet->data = (unsigned long)info; 332 tasklet->data = (unsigned long)info;
286 tasklet_schedule(tasklet); 333 tasklet_schedule(tasklet);
287} 334}
@@ -294,6 +341,7 @@ void flow_cache_flush(void)
294 /* Don't want cpus going down or up during this. */ 341 /* Don't want cpus going down or up during this. */
295 get_online_cpus(); 342 get_online_cpus();
296 mutex_lock(&flow_flush_sem); 343 mutex_lock(&flow_flush_sem);
344 info.cache = &flow_cache_global;
297 atomic_set(&info.cpuleft, num_online_cpus()); 345 atomic_set(&info.cpuleft, num_online_cpus());
298 init_completion(&info.completion); 346 init_completion(&info.completion);
299 347
@@ -307,62 +355,75 @@ void flow_cache_flush(void)
307 put_online_cpus(); 355 put_online_cpus();
308} 356}
309 357
310static void __init flow_cache_cpu_prepare(int cpu) 358static void __init flow_cache_cpu_prepare(struct flow_cache *fc,
359 struct flow_cache_percpu *fcp)
311{ 360{
312 struct tasklet_struct *tasklet; 361 fcp->hash_table = (struct hlist_head *)
313 unsigned long order; 362 __get_free_pages(GFP_KERNEL|__GFP_ZERO, fc->order);
314 363 if (!fcp->hash_table)
315 for (order = 0; 364 panic("NET: failed to allocate flow cache order %lu\n", fc->order);
316 (PAGE_SIZE << order) < 365
317 (sizeof(struct flow_cache_entry *)*flow_hash_size); 366 fcp->hash_rnd_recalc = 1;
318 order++) 367 fcp->hash_count = 0;
319 /* NOTHING */; 368 tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0);
320
321 flow_table(cpu) = (struct flow_cache_entry **)
322 __get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
323 if (!flow_table(cpu))
324 panic("NET: failed to allocate flow cache order %lu\n", order);
325
326 flow_hash_rnd_recalc(cpu) = 1;
327 flow_count(cpu) = 0;
328
329 tasklet = flow_flush_tasklet(cpu);
330 tasklet_init(tasklet, flow_cache_flush_tasklet, 0);
331} 369}
332 370
333static int flow_cache_cpu(struct notifier_block *nfb, 371static int flow_cache_cpu(struct notifier_block *nfb,
334 unsigned long action, 372 unsigned long action,
335 void *hcpu) 373 void *hcpu)
336{ 374{
375 struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
376 int cpu = (unsigned long) hcpu;
377 struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
378
337 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) 379 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
338 __flow_cache_shrink((unsigned long)hcpu, 0); 380 __flow_cache_shrink(fc, fcp, 0);
339 return NOTIFY_OK; 381 return NOTIFY_OK;
340} 382}
341 383
342static int __init flow_cache_init(void) 384static int flow_cache_init(struct flow_cache *fc)
343{ 385{
386 unsigned long order;
344 int i; 387 int i;
345 388
346 flow_cachep = kmem_cache_create("flow_cache", 389 fc->hash_shift = 10;
347 sizeof(struct flow_cache_entry), 390 fc->low_watermark = 2 * flow_cache_hash_size(fc);
348 0, SLAB_PANIC, 391 fc->high_watermark = 4 * flow_cache_hash_size(fc);
349 NULL); 392
350 flow_hash_shift = 10; 393 for (order = 0;
351 flow_lwm = 2 * flow_hash_size; 394 (PAGE_SIZE << order) <
352 flow_hwm = 4 * flow_hash_size; 395 (sizeof(struct hlist_head)*flow_cache_hash_size(fc));
396 order++)
397 /* NOTHING */;
398 fc->order = order;
399 fc->percpu = alloc_percpu(struct flow_cache_percpu);
353 400
354 setup_timer(&flow_hash_rnd_timer, flow_cache_new_hashrnd, 0); 401 setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd,
355 flow_hash_rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; 402 (unsigned long) fc);
356 add_timer(&flow_hash_rnd_timer); 403 fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD;
404 add_timer(&fc->rnd_timer);
357 405
358 for_each_possible_cpu(i) 406 for_each_possible_cpu(i)
359 flow_cache_cpu_prepare(i); 407 flow_cache_cpu_prepare(fc, per_cpu_ptr(fc->percpu, i));
408
409 fc->hotcpu_notifier = (struct notifier_block){
410 .notifier_call = flow_cache_cpu,
411 };
412 register_hotcpu_notifier(&fc->hotcpu_notifier);
360 413
361 hotcpu_notifier(flow_cache_cpu, 0);
362 return 0; 414 return 0;
363} 415}
364 416
365module_init(flow_cache_init); 417static int __init flow_cache_init_global(void)
418{
419 flow_cachep = kmem_cache_create("flow_cache",
420 sizeof(struct flow_cache_entry),
421 0, SLAB_PANIC, NULL);
422
423 return flow_cache_init(&flow_cache_global);
424}
425
426module_init(flow_cache_init_global);
366 427
367EXPORT_SYMBOL(flow_cache_genid); 428EXPORT_SYMBOL(flow_cache_genid);
368EXPORT_SYMBOL(flow_cache_lookup); 429EXPORT_SYMBOL(flow_cache_lookup);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 59cfc7d8fc45..c57c4b228bb5 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -17,6 +17,7 @@
17#include <net/sock.h> 17#include <net/sock.h>
18#include <linux/rtnetlink.h> 18#include <linux/rtnetlink.h>
19#include <linux/wireless.h> 19#include <linux/wireless.h>
20#include <linux/vmalloc.h>
20#include <net/wext.h> 21#include <net/wext.h>
21 22
22#include "net-sysfs.h" 23#include "net-sysfs.h"
@@ -467,6 +468,304 @@ static struct attribute_group wireless_group = {
467}; 468};
468#endif 469#endif
469 470
471#ifdef CONFIG_RPS
472/*
473 * RX queue sysfs structures and functions.
474 */
475struct rx_queue_attribute {
476 struct attribute attr;
477 ssize_t (*show)(struct netdev_rx_queue *queue,
478 struct rx_queue_attribute *attr, char *buf);
479 ssize_t (*store)(struct netdev_rx_queue *queue,
480 struct rx_queue_attribute *attr, const char *buf, size_t len);
481};
482#define to_rx_queue_attr(_attr) container_of(_attr, \
483 struct rx_queue_attribute, attr)
484
485#define to_rx_queue(obj) container_of(obj, struct netdev_rx_queue, kobj)
486
487static ssize_t rx_queue_attr_show(struct kobject *kobj, struct attribute *attr,
488 char *buf)
489{
490 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
491 struct netdev_rx_queue *queue = to_rx_queue(kobj);
492
493 if (!attribute->show)
494 return -EIO;
495
496 return attribute->show(queue, attribute, buf);
497}
498
499static ssize_t rx_queue_attr_store(struct kobject *kobj, struct attribute *attr,
500 const char *buf, size_t count)
501{
502 struct rx_queue_attribute *attribute = to_rx_queue_attr(attr);
503 struct netdev_rx_queue *queue = to_rx_queue(kobj);
504
505 if (!attribute->store)
506 return -EIO;
507
508 return attribute->store(queue, attribute, buf, count);
509}
510
511static struct sysfs_ops rx_queue_sysfs_ops = {
512 .show = rx_queue_attr_show,
513 .store = rx_queue_attr_store,
514};
515
516static ssize_t show_rps_map(struct netdev_rx_queue *queue,
517 struct rx_queue_attribute *attribute, char *buf)
518{
519 struct rps_map *map;
520 cpumask_var_t mask;
521 size_t len = 0;
522 int i;
523
524 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
525 return -ENOMEM;
526
527 rcu_read_lock();
528 map = rcu_dereference(queue->rps_map);
529 if (map)
530 for (i = 0; i < map->len; i++)
531 cpumask_set_cpu(map->cpus[i], mask);
532
533 len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
534 if (PAGE_SIZE - len < 3) {
535 rcu_read_unlock();
536 free_cpumask_var(mask);
537 return -EINVAL;
538 }
539 rcu_read_unlock();
540
541 free_cpumask_var(mask);
542 len += sprintf(buf + len, "\n");
543 return len;
544}
545
546static void rps_map_release(struct rcu_head *rcu)
547{
548 struct rps_map *map = container_of(rcu, struct rps_map, rcu);
549
550 kfree(map);
551}
552
553static ssize_t store_rps_map(struct netdev_rx_queue *queue,
554 struct rx_queue_attribute *attribute,
555 const char *buf, size_t len)
556{
557 struct rps_map *old_map, *map;
558 cpumask_var_t mask;
559 int err, cpu, i;
560 static DEFINE_SPINLOCK(rps_map_lock);
561
562 if (!capable(CAP_NET_ADMIN))
563 return -EPERM;
564
565 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
566 return -ENOMEM;
567
568 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
569 if (err) {
570 free_cpumask_var(mask);
571 return err;
572 }
573
574 map = kzalloc(max_t(unsigned,
575 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
576 GFP_KERNEL);
577 if (!map) {
578 free_cpumask_var(mask);
579 return -ENOMEM;
580 }
581
582 i = 0;
583 for_each_cpu_and(cpu, mask, cpu_online_mask)
584 map->cpus[i++] = cpu;
585
586 if (i)
587 map->len = i;
588 else {
589 kfree(map);
590 map = NULL;
591 }
592
593 spin_lock(&rps_map_lock);
594 old_map = queue->rps_map;
595 rcu_assign_pointer(queue->rps_map, map);
596 spin_unlock(&rps_map_lock);
597
598 if (old_map)
599 call_rcu(&old_map->rcu, rps_map_release);
600
601 free_cpumask_var(mask);
602 return len;
603}
604
605static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
606 struct rx_queue_attribute *attr,
607 char *buf)
608{
609 struct rps_dev_flow_table *flow_table;
610 unsigned int val = 0;
611
612 rcu_read_lock();
613 flow_table = rcu_dereference(queue->rps_flow_table);
614 if (flow_table)
615 val = flow_table->mask + 1;
616 rcu_read_unlock();
617
618 return sprintf(buf, "%u\n", val);
619}
620
621static void rps_dev_flow_table_release_work(struct work_struct *work)
622{
623 struct rps_dev_flow_table *table = container_of(work,
624 struct rps_dev_flow_table, free_work);
625
626 vfree(table);
627}
628
629static void rps_dev_flow_table_release(struct rcu_head *rcu)
630{
631 struct rps_dev_flow_table *table = container_of(rcu,
632 struct rps_dev_flow_table, rcu);
633
634 INIT_WORK(&table->free_work, rps_dev_flow_table_release_work);
635 schedule_work(&table->free_work);
636}
637
638static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue,
639 struct rx_queue_attribute *attr,
640 const char *buf, size_t len)
641{
642 unsigned int count;
643 char *endp;
644 struct rps_dev_flow_table *table, *old_table;
645 static DEFINE_SPINLOCK(rps_dev_flow_lock);
646
647 if (!capable(CAP_NET_ADMIN))
648 return -EPERM;
649
650 count = simple_strtoul(buf, &endp, 0);
651 if (endp == buf)
652 return -EINVAL;
653
654 if (count) {
655 int i;
656
657 if (count > 1<<30) {
658 /* Enforce a limit to prevent overflow */
659 return -EINVAL;
660 }
661 count = roundup_pow_of_two(count);
662 table = vmalloc(RPS_DEV_FLOW_TABLE_SIZE(count));
663 if (!table)
664 return -ENOMEM;
665
666 table->mask = count - 1;
667 for (i = 0; i < count; i++)
668 table->flows[i].cpu = RPS_NO_CPU;
669 } else
670 table = NULL;
671
672 spin_lock(&rps_dev_flow_lock);
673 old_table = queue->rps_flow_table;
674 rcu_assign_pointer(queue->rps_flow_table, table);
675 spin_unlock(&rps_dev_flow_lock);
676
677 if (old_table)
678 call_rcu(&old_table->rcu, rps_dev_flow_table_release);
679
680 return len;
681}
682
683static struct rx_queue_attribute rps_cpus_attribute =
684 __ATTR(rps_cpus, S_IRUGO | S_IWUSR, show_rps_map, store_rps_map);
685
686
687static struct rx_queue_attribute rps_dev_flow_table_cnt_attribute =
688 __ATTR(rps_flow_cnt, S_IRUGO | S_IWUSR,
689 show_rps_dev_flow_table_cnt, store_rps_dev_flow_table_cnt);
690
691static struct attribute *rx_queue_default_attrs[] = {
692 &rps_cpus_attribute.attr,
693 &rps_dev_flow_table_cnt_attribute.attr,
694 NULL
695};
696
697static void rx_queue_release(struct kobject *kobj)
698{
699 struct netdev_rx_queue *queue = to_rx_queue(kobj);
700 struct netdev_rx_queue *first = queue->first;
701
702 if (queue->rps_map)
703 call_rcu(&queue->rps_map->rcu, rps_map_release);
704
705 if (queue->rps_flow_table)
706 call_rcu(&queue->rps_flow_table->rcu,
707 rps_dev_flow_table_release);
708
709 if (atomic_dec_and_test(&first->count))
710 kfree(first);
711}
712
713static struct kobj_type rx_queue_ktype = {
714 .sysfs_ops = &rx_queue_sysfs_ops,
715 .release = rx_queue_release,
716 .default_attrs = rx_queue_default_attrs,
717};
718
719static int rx_queue_add_kobject(struct net_device *net, int index)
720{
721 struct netdev_rx_queue *queue = net->_rx + index;
722 struct kobject *kobj = &queue->kobj;
723 int error = 0;
724
725 kobj->kset = net->queues_kset;
726 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
727 "rx-%u", index);
728 if (error) {
729 kobject_put(kobj);
730 return error;
731 }
732
733 kobject_uevent(kobj, KOBJ_ADD);
734
735 return error;
736}
737
738static int rx_queue_register_kobjects(struct net_device *net)
739{
740 int i;
741 int error = 0;
742
743 net->queues_kset = kset_create_and_add("queues",
744 NULL, &net->dev.kobj);
745 if (!net->queues_kset)
746 return -ENOMEM;
747 for (i = 0; i < net->num_rx_queues; i++) {
748 error = rx_queue_add_kobject(net, i);
749 if (error)
750 break;
751 }
752
753 if (error)
754 while (--i >= 0)
755 kobject_put(&net->_rx[i].kobj);
756
757 return error;
758}
759
760static void rx_queue_remove_kobjects(struct net_device *net)
761{
762 int i;
763
764 for (i = 0; i < net->num_rx_queues; i++)
765 kobject_put(&net->_rx[i].kobj);
766 kset_unregister(net->queues_kset);
767}
768#endif /* CONFIG_RPS */
470#endif /* CONFIG_SYSFS */ 769#endif /* CONFIG_SYSFS */
471 770
472#ifdef CONFIG_HOTPLUG 771#ifdef CONFIG_HOTPLUG
@@ -530,6 +829,10 @@ void netdev_unregister_kobject(struct net_device * net)
530 if (!net_eq(dev_net(net), &init_net)) 829 if (!net_eq(dev_net(net), &init_net))
531 return; 830 return;
532 831
832#ifdef CONFIG_RPS
833 rx_queue_remove_kobjects(net);
834#endif
835
533 device_del(dev); 836 device_del(dev);
534} 837}
535 838
@@ -538,6 +841,7 @@ int netdev_register_kobject(struct net_device *net)
538{ 841{
539 struct device *dev = &(net->dev); 842 struct device *dev = &(net->dev);
540 const struct attribute_group **groups = net->sysfs_groups; 843 const struct attribute_group **groups = net->sysfs_groups;
844 int error = 0;
541 845
542 dev->class = &net_class; 846 dev->class = &net_class;
543 dev->platform_data = net; 847 dev->platform_data = net;
@@ -564,7 +868,19 @@ int netdev_register_kobject(struct net_device *net)
564 if (!net_eq(dev_net(net), &init_net)) 868 if (!net_eq(dev_net(net), &init_net))
565 return 0; 869 return 0;
566 870
567 return device_add(dev); 871 error = device_add(dev);
872 if (error)
873 return error;
874
875#ifdef CONFIG_RPS
876 error = rx_queue_register_kobjects(net);
877 if (error) {
878 device_del(dev);
879 return error;
880 }
881#endif
882
883 return error;
568} 884}
569 885
570int netdev_class_create_file(struct class_attribute *class_attr) 886int netdev_class_create_file(struct class_attribute *class_attr)
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 43923811bd6a..2ad68da418df 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -169,7 +169,7 @@
169#include <asm/dma.h> 169#include <asm/dma.h>
170#include <asm/div64.h> /* do_div */ 170#include <asm/div64.h> /* do_div */
171 171
172#define VERSION "2.72" 172#define VERSION "2.73"
173#define IP_NAME_SZ 32 173#define IP_NAME_SZ 32
174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */ 174#define MAX_MPLS_LABELS 16 /* This is the max label stack depth */
175#define MPLS_STACK_BOTTOM htonl(0x00000100) 175#define MPLS_STACK_BOTTOM htonl(0x00000100)
@@ -190,6 +190,7 @@
190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */ 190#define F_IPSEC_ON (1<<12) /* ipsec on for flows */
191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */ 191#define F_QUEUE_MAP_RND (1<<13) /* queue map Random */
192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */ 192#define F_QUEUE_MAP_CPU (1<<14) /* queue map mirrors smp_processor_id() */
193#define F_NODE (1<<15) /* Node memory alloc*/
193 194
194/* Thread control flag bits */ 195/* Thread control flag bits */
195#define T_STOP (1<<0) /* Stop run */ 196#define T_STOP (1<<0) /* Stop run */
@@ -372,6 +373,7 @@ struct pktgen_dev {
372 373
373 u16 queue_map_min; 374 u16 queue_map_min;
374 u16 queue_map_max; 375 u16 queue_map_max;
376 int node; /* Memory node */
375 377
376#ifdef CONFIG_XFRM 378#ifdef CONFIG_XFRM
377 __u8 ipsmode; /* IPSEC mode (config) */ 379 __u8 ipsmode; /* IPSEC mode (config) */
@@ -607,6 +609,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
607 if (pkt_dev->traffic_class) 609 if (pkt_dev->traffic_class)
608 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class); 610 seq_printf(seq, " traffic_class: 0x%02x\n", pkt_dev->traffic_class);
609 611
612 if (pkt_dev->node >= 0)
613 seq_printf(seq, " node: %d\n", pkt_dev->node);
614
610 seq_printf(seq, " Flags: "); 615 seq_printf(seq, " Flags: ");
611 616
612 if (pkt_dev->flags & F_IPV6) 617 if (pkt_dev->flags & F_IPV6)
@@ -660,6 +665,9 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
660 if (pkt_dev->flags & F_SVID_RND) 665 if (pkt_dev->flags & F_SVID_RND)
661 seq_printf(seq, "SVID_RND "); 666 seq_printf(seq, "SVID_RND ");
662 667
668 if (pkt_dev->flags & F_NODE)
669 seq_printf(seq, "NODE_ALLOC ");
670
663 seq_puts(seq, "\n"); 671 seq_puts(seq, "\n");
664 672
665 /* not really stopped, more like last-running-at */ 673 /* not really stopped, more like last-running-at */
@@ -1074,6 +1082,21 @@ static ssize_t pktgen_if_write(struct file *file,
1074 pkt_dev->dst_mac_count); 1082 pkt_dev->dst_mac_count);
1075 return count; 1083 return count;
1076 } 1084 }
1085 if (!strcmp(name, "node")) {
1086 len = num_arg(&user_buffer[i], 10, &value);
1087 if (len < 0)
1088 return len;
1089
1090 i += len;
1091
1092 if (node_possible(value)) {
1093 pkt_dev->node = value;
1094 sprintf(pg_result, "OK: node=%d", pkt_dev->node);
1095 }
1096 else
1097 sprintf(pg_result, "ERROR: node not possible");
1098 return count;
1099 }
1077 if (!strcmp(name, "flag")) { 1100 if (!strcmp(name, "flag")) {
1078 char f[32]; 1101 char f[32];
1079 memset(f, 0, 32); 1102 memset(f, 0, 32);
@@ -1166,12 +1189,18 @@ static ssize_t pktgen_if_write(struct file *file,
1166 else if (strcmp(f, "!IPV6") == 0) 1189 else if (strcmp(f, "!IPV6") == 0)
1167 pkt_dev->flags &= ~F_IPV6; 1190 pkt_dev->flags &= ~F_IPV6;
1168 1191
1192 else if (strcmp(f, "NODE_ALLOC") == 0)
1193 pkt_dev->flags |= F_NODE;
1194
1195 else if (strcmp(f, "!NODE_ALLOC") == 0)
1196 pkt_dev->flags &= ~F_NODE;
1197
1169 else { 1198 else {
1170 sprintf(pg_result, 1199 sprintf(pg_result,
1171 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s", 1200 "Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
1172 f, 1201 f,
1173 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, " 1202 "IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
1174 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC\n"); 1203 "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
1175 return count; 1204 return count;
1176 } 1205 }
1177 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags); 1206 sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
@@ -2572,9 +2601,27 @@ static struct sk_buff *fill_packet_ipv4(struct net_device *odev,
2572 mod_cur_headers(pkt_dev); 2601 mod_cur_headers(pkt_dev);
2573 2602
2574 datalen = (odev->hard_header_len + 16) & ~0xf; 2603 datalen = (odev->hard_header_len + 16) & ~0xf;
2575 skb = __netdev_alloc_skb(odev, 2604
2576 pkt_dev->cur_pkt_size + 64 2605 if (pkt_dev->flags & F_NODE) {
2577 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT); 2606 int node;
2607
2608 if (pkt_dev->node >= 0)
2609 node = pkt_dev->node;
2610 else
2611 node = numa_node_id();
2612
2613 skb = __alloc_skb(NET_SKB_PAD + pkt_dev->cur_pkt_size + 64
2614 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT, 0, node);
2615 if (likely(skb)) {
2616 skb_reserve(skb, NET_SKB_PAD);
2617 skb->dev = odev;
2618 }
2619 }
2620 else
2621 skb = __netdev_alloc_skb(odev,
2622 pkt_dev->cur_pkt_size + 64
2623 + datalen + pkt_dev->pkt_overhead, GFP_NOWAIT);
2624
2578 if (!skb) { 2625 if (!skb) {
2579 sprintf(pkt_dev->result, "No memory"); 2626 sprintf(pkt_dev->result, "No memory");
2580 return NULL; 2627 return NULL;
@@ -3674,6 +3721,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
3674 pkt_dev->svlan_p = 0; 3721 pkt_dev->svlan_p = 0;
3675 pkt_dev->svlan_cfi = 0; 3722 pkt_dev->svlan_cfi = 0;
3676 pkt_dev->svlan_id = 0xffff; 3723 pkt_dev->svlan_id = 0xffff;
3724 pkt_dev->node = -1;
3677 3725
3678 err = pktgen_setup_dev(pkt_dev, ifname); 3726 err = pktgen_setup_dev(pkt_dev, ifname);
3679 if (err) 3727 if (err)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 4568120d8533..78c85985cb30 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -118,7 +118,11 @@ static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
118{ 118{
119 struct rtnl_link *tab; 119 struct rtnl_link *tab;
120 120
121 tab = rtnl_msg_handlers[protocol]; 121 if (protocol < NPROTO)
122 tab = rtnl_msg_handlers[protocol];
123 else
124 tab = NULL;
125
122 if (tab == NULL || tab[msgindex].doit == NULL) 126 if (tab == NULL || tab[msgindex].doit == NULL)
123 tab = rtnl_msg_handlers[PF_UNSPEC]; 127 tab = rtnl_msg_handlers[PF_UNSPEC];
124 128
@@ -129,7 +133,11 @@ static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
129{ 133{
130 struct rtnl_link *tab; 134 struct rtnl_link *tab;
131 135
132 tab = rtnl_msg_handlers[protocol]; 136 if (protocol < NPROTO)
137 tab = rtnl_msg_handlers[protocol];
138 else
139 tab = NULL;
140
133 if (tab == NULL || tab[msgindex].dumpit == NULL) 141 if (tab == NULL || tab[msgindex].dumpit == NULL)
134 tab = rtnl_msg_handlers[PF_UNSPEC]; 142 tab = rtnl_msg_handlers[PF_UNSPEC];
135 143
@@ -600,7 +608,41 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
600 608
601 a->rx_compressed = b->rx_compressed; 609 a->rx_compressed = b->rx_compressed;
602 a->tx_compressed = b->tx_compressed; 610 a->tx_compressed = b->tx_compressed;
603}; 611}
612
613static void copy_rtnl_link_stats64(void *v, const struct net_device_stats *b)
614{
615 struct rtnl_link_stats64 a;
616
617 a.rx_packets = b->rx_packets;
618 a.tx_packets = b->tx_packets;
619 a.rx_bytes = b->rx_bytes;
620 a.tx_bytes = b->tx_bytes;
621 a.rx_errors = b->rx_errors;
622 a.tx_errors = b->tx_errors;
623 a.rx_dropped = b->rx_dropped;
624 a.tx_dropped = b->tx_dropped;
625
626 a.multicast = b->multicast;
627 a.collisions = b->collisions;
628
629 a.rx_length_errors = b->rx_length_errors;
630 a.rx_over_errors = b->rx_over_errors;
631 a.rx_crc_errors = b->rx_crc_errors;
632 a.rx_frame_errors = b->rx_frame_errors;
633 a.rx_fifo_errors = b->rx_fifo_errors;
634 a.rx_missed_errors = b->rx_missed_errors;
635
636 a.tx_aborted_errors = b->tx_aborted_errors;
637 a.tx_carrier_errors = b->tx_carrier_errors;
638 a.tx_fifo_errors = b->tx_fifo_errors;
639 a.tx_heartbeat_errors = b->tx_heartbeat_errors;
640 a.tx_window_errors = b->tx_window_errors;
641
642 a.rx_compressed = b->rx_compressed;
643 a.tx_compressed = b->tx_compressed;
644 memcpy(v, &a, sizeof(a));
645}
604 646
605static inline int rtnl_vfinfo_size(const struct net_device *dev) 647static inline int rtnl_vfinfo_size(const struct net_device *dev)
606{ 648{
@@ -619,6 +661,7 @@ static inline size_t if_nlmsg_size(const struct net_device *dev)
619 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */ 661 + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
620 + nla_total_size(sizeof(struct rtnl_link_ifmap)) 662 + nla_total_size(sizeof(struct rtnl_link_ifmap))
621 + nla_total_size(sizeof(struct rtnl_link_stats)) 663 + nla_total_size(sizeof(struct rtnl_link_stats))
664 + nla_total_size(sizeof(struct rtnl_link_stats64))
622 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */ 665 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
623 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */ 666 + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
624 + nla_total_size(4) /* IFLA_TXQLEN */ 667 + nla_total_size(4) /* IFLA_TXQLEN */
@@ -698,6 +741,12 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
698 stats = dev_get_stats(dev); 741 stats = dev_get_stats(dev);
699 copy_rtnl_link_stats(nla_data(attr), stats); 742 copy_rtnl_link_stats(nla_data(attr), stats);
700 743
744 attr = nla_reserve(skb, IFLA_STATS64,
745 sizeof(struct rtnl_link_stats64));
746 if (attr == NULL)
747 goto nla_put_failure;
748 copy_rtnl_link_stats64(nla_data(attr), stats);
749
701 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) { 750 if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent) {
702 int i; 751 int i;
703 struct ifla_vf_info ivi; 752 struct ifla_vf_info ivi;
@@ -1403,9 +1452,6 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1403 return 0; 1452 return 0;
1404 1453
1405 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family; 1454 family = ((struct rtgenmsg *)NLMSG_DATA(nlh))->rtgen_family;
1406 if (family >= NPROTO)
1407 return -EAFNOSUPPORT;
1408
1409 sz_idx = type>>2; 1455 sz_idx = type>>2;
1410 kind = type&3; 1456 kind = type&3;
1411 1457
@@ -1473,6 +1519,7 @@ static int rtnetlink_event(struct notifier_block *this, unsigned long event, voi
1473 case NETDEV_POST_INIT: 1519 case NETDEV_POST_INIT:
1474 case NETDEV_REGISTER: 1520 case NETDEV_REGISTER:
1475 case NETDEV_CHANGE: 1521 case NETDEV_CHANGE:
1522 case NETDEV_PRE_TYPE_CHANGE:
1476 case NETDEV_GOING_DOWN: 1523 case NETDEV_GOING_DOWN:
1477 case NETDEV_UNREGISTER: 1524 case NETDEV_UNREGISTER:
1478 case NETDEV_UNREGISTER_BATCH: 1525 case NETDEV_UNREGISTER_BATCH:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 93c4e060c91e..4218ff49bf13 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -117,7 +117,7 @@ static const struct pipe_buf_operations sock_pipe_buf_ops = {
117 * 117 *
118 * Out of line support code for skb_put(). Not user callable. 118 * Out of line support code for skb_put(). Not user callable.
119 */ 119 */
120void skb_over_panic(struct sk_buff *skb, int sz, void *here) 120static void skb_over_panic(struct sk_buff *skb, int sz, void *here)
121{ 121{
122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p " 122 printk(KERN_EMERG "skb_over_panic: text:%p len:%d put:%d head:%p "
123 "data:%p tail:%#lx end:%#lx dev:%s\n", 123 "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -126,7 +126,6 @@ void skb_over_panic(struct sk_buff *skb, int sz, void *here)
126 skb->dev ? skb->dev->name : "<NULL>"); 126 skb->dev ? skb->dev->name : "<NULL>");
127 BUG(); 127 BUG();
128} 128}
129EXPORT_SYMBOL(skb_over_panic);
130 129
131/** 130/**
132 * skb_under_panic - private function 131 * skb_under_panic - private function
@@ -137,7 +136,7 @@ EXPORT_SYMBOL(skb_over_panic);
137 * Out of line support code for skb_push(). Not user callable. 136 * Out of line support code for skb_push(). Not user callable.
138 */ 137 */
139 138
140void skb_under_panic(struct sk_buff *skb, int sz, void *here) 139static void skb_under_panic(struct sk_buff *skb, int sz, void *here)
141{ 140{
142 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p " 141 printk(KERN_EMERG "skb_under_panic: text:%p len:%d put:%d head:%p "
143 "data:%p tail:%#lx end:%#lx dev:%s\n", 142 "data:%p tail:%#lx end:%#lx dev:%s\n",
@@ -146,7 +145,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
146 skb->dev ? skb->dev->name : "<NULL>"); 145 skb->dev ? skb->dev->name : "<NULL>");
147 BUG(); 146 BUG();
148} 147}
149EXPORT_SYMBOL(skb_under_panic);
150 148
151/* Allocate a new skbuff. We do this ourselves so we can fill in a few 149/* Allocate a new skbuff. We do this ourselves so we can fill in a few
152 * 'private' fields and also do memory statistics to find all the 150 * 'private' fields and also do memory statistics to find all the
@@ -534,6 +532,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
534 new->network_header = old->network_header; 532 new->network_header = old->network_header;
535 new->mac_header = old->mac_header; 533 new->mac_header = old->mac_header;
536 skb_dst_set(new, dst_clone(skb_dst(old))); 534 skb_dst_set(new, dst_clone(skb_dst(old)));
535 new->rxhash = old->rxhash;
537#ifdef CONFIG_XFRM 536#ifdef CONFIG_XFRM
538 new->sp = secpath_get(old->sp); 537 new->sp = secpath_get(old->sp);
539#endif 538#endif
@@ -581,6 +580,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
581 C(len); 580 C(len);
582 C(data_len); 581 C(data_len);
583 C(mac_len); 582 C(mac_len);
583 C(rxhash);
584 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len; 584 n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
585 n->cloned = 1; 585 n->cloned = 1;
586 n->nohdr = 0; 586 n->nohdr = 0;
diff --git a/net/core/sock.c b/net/core/sock.c
index c5812bbc2cc9..58ebd146ce5a 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -364,11 +364,11 @@ EXPORT_SYMBOL(sk_reset_txq);
364 364
365struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie) 365struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
366{ 366{
367 struct dst_entry *dst = sk->sk_dst_cache; 367 struct dst_entry *dst = __sk_dst_get(sk);
368 368
369 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 369 if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
370 sk_tx_queue_clear(sk); 370 sk_tx_queue_clear(sk);
371 sk->sk_dst_cache = NULL; 371 rcu_assign_pointer(sk->sk_dst_cache, NULL);
372 dst_release(dst); 372 dst_release(dst);
373 return NULL; 373 return NULL;
374 } 374 }
@@ -1157,7 +1157,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
1157 skb_queue_head_init(&newsk->sk_async_wait_queue); 1157 skb_queue_head_init(&newsk->sk_async_wait_queue);
1158#endif 1158#endif
1159 1159
1160 rwlock_init(&newsk->sk_dst_lock); 1160 spin_lock_init(&newsk->sk_dst_lock);
1161 rwlock_init(&newsk->sk_callback_lock); 1161 rwlock_init(&newsk->sk_callback_lock);
1162 lockdep_set_class_and_name(&newsk->sk_callback_lock, 1162 lockdep_set_class_and_name(&newsk->sk_callback_lock,
1163 af_callback_keys + newsk->sk_family, 1163 af_callback_keys + newsk->sk_family,
@@ -1395,7 +1395,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1395 if (signal_pending(current)) 1395 if (signal_pending(current))
1396 break; 1396 break;
1397 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 1397 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1398 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1398 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1399 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) 1399 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
1400 break; 1400 break;
1401 if (sk->sk_shutdown & SEND_SHUTDOWN) 1401 if (sk->sk_shutdown & SEND_SHUTDOWN)
@@ -1404,7 +1404,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
1404 break; 1404 break;
1405 timeo = schedule_timeout(timeo); 1405 timeo = schedule_timeout(timeo);
1406 } 1406 }
1407 finish_wait(sk->sk_sleep, &wait); 1407 finish_wait(sk_sleep(sk), &wait);
1408 return timeo; 1408 return timeo;
1409} 1409}
1410 1410
@@ -1570,11 +1570,11 @@ int sk_wait_data(struct sock *sk, long *timeo)
1570 int rc; 1570 int rc;
1571 DEFINE_WAIT(wait); 1571 DEFINE_WAIT(wait);
1572 1572
1573 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1573 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1574 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1574 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1575 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1575 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
1576 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1576 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1577 finish_wait(sk->sk_sleep, &wait); 1577 finish_wait(sk_sleep(sk), &wait);
1578 return rc; 1578 return rc;
1579} 1579}
1580EXPORT_SYMBOL(sk_wait_data); 1580EXPORT_SYMBOL(sk_wait_data);
@@ -1798,7 +1798,7 @@ static void sock_def_wakeup(struct sock *sk)
1798{ 1798{
1799 read_lock(&sk->sk_callback_lock); 1799 read_lock(&sk->sk_callback_lock);
1800 if (sk_has_sleeper(sk)) 1800 if (sk_has_sleeper(sk))
1801 wake_up_interruptible_all(sk->sk_sleep); 1801 wake_up_interruptible_all(sk_sleep(sk));
1802 read_unlock(&sk->sk_callback_lock); 1802 read_unlock(&sk->sk_callback_lock);
1803} 1803}
1804 1804
@@ -1806,7 +1806,7 @@ static void sock_def_error_report(struct sock *sk)
1806{ 1806{
1807 read_lock(&sk->sk_callback_lock); 1807 read_lock(&sk->sk_callback_lock);
1808 if (sk_has_sleeper(sk)) 1808 if (sk_has_sleeper(sk))
1809 wake_up_interruptible_poll(sk->sk_sleep, POLLERR); 1809 wake_up_interruptible_poll(sk_sleep(sk), POLLERR);
1810 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR); 1810 sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
1811 read_unlock(&sk->sk_callback_lock); 1811 read_unlock(&sk->sk_callback_lock);
1812} 1812}
@@ -1815,7 +1815,7 @@ static void sock_def_readable(struct sock *sk, int len)
1815{ 1815{
1816 read_lock(&sk->sk_callback_lock); 1816 read_lock(&sk->sk_callback_lock);
1817 if (sk_has_sleeper(sk)) 1817 if (sk_has_sleeper(sk))
1818 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLIN | 1818 wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN |
1819 POLLRDNORM | POLLRDBAND); 1819 POLLRDNORM | POLLRDBAND);
1820 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); 1820 sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
1821 read_unlock(&sk->sk_callback_lock); 1821 read_unlock(&sk->sk_callback_lock);
@@ -1830,7 +1830,7 @@ static void sock_def_write_space(struct sock *sk)
1830 */ 1830 */
1831 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) { 1831 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
1832 if (sk_has_sleeper(sk)) 1832 if (sk_has_sleeper(sk))
1833 wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT | 1833 wake_up_interruptible_sync_poll(sk_sleep(sk), POLLOUT |
1834 POLLWRNORM | POLLWRBAND); 1834 POLLWRNORM | POLLWRBAND);
1835 1835
1836 /* Should agree with poll, otherwise some programs break */ 1836 /* Should agree with poll, otherwise some programs break */
@@ -1898,7 +1898,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
1898 } else 1898 } else
1899 sk->sk_sleep = NULL; 1899 sk->sk_sleep = NULL;
1900 1900
1901 rwlock_init(&sk->sk_dst_lock); 1901 spin_lock_init(&sk->sk_dst_lock);
1902 rwlock_init(&sk->sk_callback_lock); 1902 rwlock_init(&sk->sk_callback_lock);
1903 lockdep_set_class_and_name(&sk->sk_callback_lock, 1903 lockdep_set_class_and_name(&sk->sk_callback_lock,
1904 af_callback_keys + sk->sk_family, 1904 af_callback_keys + sk->sk_family,
diff --git a/net/core/stream.c b/net/core/stream.c
index a37debfeb1b2..7b3c3f30b107 100644
--- a/net/core/stream.c
+++ b/net/core/stream.c
@@ -32,8 +32,8 @@ void sk_stream_write_space(struct sock *sk)
32 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) { 32 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
33 clear_bit(SOCK_NOSPACE, &sock->flags); 33 clear_bit(SOCK_NOSPACE, &sock->flags);
34 34
35 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 35 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
36 wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | 36 wake_up_interruptible_poll(sk_sleep(sk), POLLOUT |
37 POLLWRNORM | POLLWRBAND); 37 POLLWRNORM | POLLWRBAND);
38 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 38 if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
39 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT); 39 sock_wake_async(sock, SOCK_WAKE_SPACE, POLL_OUT);
@@ -66,13 +66,13 @@ int sk_stream_wait_connect(struct sock *sk, long *timeo_p)
66 if (signal_pending(tsk)) 66 if (signal_pending(tsk))
67 return sock_intr_errno(*timeo_p); 67 return sock_intr_errno(*timeo_p);
68 68
69 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 69 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
70 sk->sk_write_pending++; 70 sk->sk_write_pending++;
71 done = sk_wait_event(sk, timeo_p, 71 done = sk_wait_event(sk, timeo_p,
72 !sk->sk_err && 72 !sk->sk_err &&
73 !((1 << sk->sk_state) & 73 !((1 << sk->sk_state) &
74 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))); 74 ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)));
75 finish_wait(sk->sk_sleep, &wait); 75 finish_wait(sk_sleep(sk), &wait);
76 sk->sk_write_pending--; 76 sk->sk_write_pending--;
77 } while (!done); 77 } while (!done);
78 return 0; 78 return 0;
@@ -96,13 +96,13 @@ void sk_stream_wait_close(struct sock *sk, long timeout)
96 DEFINE_WAIT(wait); 96 DEFINE_WAIT(wait);
97 97
98 do { 98 do {
99 prepare_to_wait(sk->sk_sleep, &wait, 99 prepare_to_wait(sk_sleep(sk), &wait,
100 TASK_INTERRUPTIBLE); 100 TASK_INTERRUPTIBLE);
101 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk))) 101 if (sk_wait_event(sk, &timeout, !sk_stream_closing(sk)))
102 break; 102 break;
103 } while (!signal_pending(current) && timeout); 103 } while (!signal_pending(current) && timeout);
104 104
105 finish_wait(sk->sk_sleep, &wait); 105 finish_wait(sk_sleep(sk), &wait);
106 } 106 }
107} 107}
108 108
@@ -126,7 +126,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
126 while (1) { 126 while (1) {
127 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); 127 set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
128 128
129 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 129 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
130 130
131 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) 131 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
132 goto do_error; 132 goto do_error;
@@ -157,7 +157,7 @@ int sk_stream_wait_memory(struct sock *sk, long *timeo_p)
157 *timeo_p = current_timeo; 157 *timeo_p = current_timeo;
158 } 158 }
159out: 159out:
160 finish_wait(sk->sk_sleep, &wait); 160 finish_wait(sk_sleep(sk), &wait);
161 return err; 161 return err;
162 162
163do_error: 163do_error:
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index b7b6b8208f75..dcc7d25996ab 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -11,12 +11,72 @@
11#include <linux/socket.h> 11#include <linux/socket.h>
12#include <linux/netdevice.h> 12#include <linux/netdevice.h>
13#include <linux/ratelimit.h> 13#include <linux/ratelimit.h>
14#include <linux/vmalloc.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/slab.h> 16#include <linux/slab.h>
16 17
17#include <net/ip.h> 18#include <net/ip.h>
18#include <net/sock.h> 19#include <net/sock.h>
19 20
21#ifdef CONFIG_RPS
22static int rps_sock_flow_sysctl(ctl_table *table, int write,
23 void __user *buffer, size_t *lenp, loff_t *ppos)
24{
25 unsigned int orig_size, size;
26 int ret, i;
27 ctl_table tmp = {
28 .data = &size,
29 .maxlen = sizeof(size),
30 .mode = table->mode
31 };
32 struct rps_sock_flow_table *orig_sock_table, *sock_table;
33 static DEFINE_MUTEX(sock_flow_mutex);
34
35 mutex_lock(&sock_flow_mutex);
36
37 orig_sock_table = rps_sock_flow_table;
38 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0;
39
40 ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);
41
42 if (write) {
43 if (size) {
44 if (size > 1<<30) {
45 /* Enforce limit to prevent overflow */
46 mutex_unlock(&sock_flow_mutex);
47 return -EINVAL;
48 }
49 size = roundup_pow_of_two(size);
50 if (size != orig_size) {
51 sock_table =
52 vmalloc(RPS_SOCK_FLOW_TABLE_SIZE(size));
53 if (!sock_table) {
54 mutex_unlock(&sock_flow_mutex);
55 return -ENOMEM;
56 }
57
58 sock_table->mask = size - 1;
59 } else
60 sock_table = orig_sock_table;
61
62 for (i = 0; i < size; i++)
63 sock_table->ents[i] = RPS_NO_CPU;
64 } else
65 sock_table = NULL;
66
67 if (sock_table != orig_sock_table) {
68 rcu_assign_pointer(rps_sock_flow_table, sock_table);
69 synchronize_rcu();
70 vfree(orig_sock_table);
71 }
72 }
73
74 mutex_unlock(&sock_flow_mutex);
75
76 return ret;
77}
78#endif /* CONFIG_RPS */
79
20static struct ctl_table net_core_table[] = { 80static struct ctl_table net_core_table[] = {
21#ifdef CONFIG_NET 81#ifdef CONFIG_NET
22 { 82 {
@@ -82,6 +142,14 @@ static struct ctl_table net_core_table[] = {
82 .mode = 0644, 142 .mode = 0644,
83 .proc_handler = proc_dointvec 143 .proc_handler = proc_dointvec
84 }, 144 },
145#ifdef CONFIG_RPS
146 {
147 .procname = "rps_sock_flow_entries",
148 .maxlen = sizeof(int),
149 .mode = 0644,
150 .proc_handler = rps_sock_flow_sysctl
151 },
152#endif
85#endif /* CONFIG_NET */ 153#endif /* CONFIG_NET */
86 { 154 {
87 .procname = "netdev_budget", 155 .procname = "netdev_budget",
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index bcd7632299f5..d3235899c7e3 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -208,7 +208,7 @@ static void ccid3_hc_tx_no_feedback_timer(unsigned long data)
208 goto restart_timer; 208 goto restart_timer;
209 } 209 }
210 210
211 ccid3_pr_debug("%s(%p, state=%s) - entry \n", dccp_role(sk), sk, 211 ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk,
212 ccid3_tx_state_name(hc->tx_state)); 212 ccid3_tx_state_name(hc->tx_state));
213 213
214 if (hc->tx_state == TFRC_SSTATE_FBACK) 214 if (hc->tx_state == TFRC_SSTATE_FBACK)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 5ef32c2f0d6a..a10a61a1ded2 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -189,7 +189,7 @@ enum {
189#define DCCP_MIB_MAX __DCCP_MIB_MAX 189#define DCCP_MIB_MAX __DCCP_MIB_MAX
190struct dccp_mib { 190struct dccp_mib {
191 unsigned long mibs[DCCP_MIB_MAX]; 191 unsigned long mibs[DCCP_MIB_MAX];
192} __SNMP_MIB_ALIGN__; 192};
193 193
194DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); 194DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
195#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) 195#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
@@ -223,7 +223,7 @@ static inline void dccp_csum_outgoing(struct sk_buff *skb)
223 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0); 223 skb->csum = skb_checksum(skb, 0, (cov > skb->len)? skb->len : cov, 0);
224} 224}
225 225
226extern void dccp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb); 226extern void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb);
227 227
228extern int dccp_retransmit_skb(struct sock *sk); 228extern int dccp_retransmit_skb(struct sock *sk);
229 229
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 9ec717426024..58f7bc156850 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -415,7 +415,7 @@ static int dccp_rcv_request_sent_state_process(struct sock *sk,
415 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq, 415 if (!between48(DCCP_SKB_CB(skb)->dccpd_ack_seq,
416 dp->dccps_awl, dp->dccps_awh)) { 416 dp->dccps_awl, dp->dccps_awh)) {
417 dccp_pr_debug("invalid ackno: S.AWL=%llu, " 417 dccp_pr_debug("invalid ackno: S.AWL=%llu, "
418 "P.ackno=%llu, S.AWH=%llu \n", 418 "P.ackno=%llu, S.AWH=%llu\n",
419 (unsigned long long)dp->dccps_awl, 419 (unsigned long long)dp->dccps_awl,
420 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq, 420 (unsigned long long)DCCP_SKB_CB(skb)->dccpd_ack_seq,
421 (unsigned long long)dp->dccps_awh); 421 (unsigned long long)dp->dccps_awh);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 52ffa1cde15a..d9b11ef8694c 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -349,7 +349,7 @@ static inline __sum16 dccp_v4_csum_finish(struct sk_buff *skb,
349 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum); 349 return csum_tcpudp_magic(src, dst, skb->len, IPPROTO_DCCP, skb->csum);
350} 350}
351 351
352void dccp_v4_send_check(struct sock *sk, int unused, struct sk_buff *skb) 352void dccp_v4_send_check(struct sock *sk, struct sk_buff *skb)
353{ 353{
354 const struct inet_sock *inet = inet_sk(sk); 354 const struct inet_sock *inet = inet_sk(sk);
355 struct dccp_hdr *dh = dccp_hdr(skb); 355 struct dccp_hdr *dh = dccp_hdr(skb);
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 3b11e41a2929..091698899594 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -60,8 +60,7 @@ static inline __sum16 dccp_v6_csum_finish(struct sk_buff *skb,
60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum); 60 return csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_DCCP, skb->csum);
61} 61}
62 62
63static inline void dccp_v6_send_check(struct sock *sk, int unused_value, 63static inline void dccp_v6_send_check(struct sock *sk, struct sk_buff *skb)
64 struct sk_buff *skb)
65{ 64{
66 struct ipv6_pinfo *np = inet6_sk(sk); 65 struct ipv6_pinfo *np = inet6_sk(sk);
67 struct dccp_hdr *dh = dccp_hdr(skb); 66 struct dccp_hdr *dh = dccp_hdr(skb);
@@ -293,7 +292,7 @@ static int dccp_v6_send_response(struct sock *sk, struct request_sock *req,
293 &ireq6->loc_addr, 292 &ireq6->loc_addr,
294 &ireq6->rmt_addr); 293 &ireq6->rmt_addr);
295 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr); 294 ipv6_addr_copy(&fl.fl6_dst, &ireq6->rmt_addr);
296 err = ip6_xmit(sk, skb, &fl, opt, 0); 295 err = ip6_xmit(sk, skb, &fl, opt);
297 err = net_xmit_eval(err); 296 err = net_xmit_eval(err);
298 } 297 }
299 298
@@ -348,7 +347,7 @@ static void dccp_v6_ctl_send_reset(struct sock *sk, struct sk_buff *rxskb)
348 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 347 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
349 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 348 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
350 skb_dst_set(skb, dst); 349 skb_dst_set(skb, dst);
351 ip6_xmit(ctl_sk, skb, &fl, NULL, 0); 350 ip6_xmit(ctl_sk, skb, &fl, NULL);
352 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); 351 DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS);
353 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); 352 DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS);
354 return; 353 return;
diff --git a/net/dccp/output.c b/net/dccp/output.c
index fc3f436440b4..2d3dcb39851f 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -129,14 +129,14 @@ static int dccp_transmit_skb(struct sock *sk, struct sk_buff *skb)
129 break; 129 break;
130 } 130 }
131 131
132 icsk->icsk_af_ops->send_check(sk, 0, skb); 132 icsk->icsk_af_ops->send_check(sk, skb);
133 133
134 if (set_ack) 134 if (set_ack)
135 dccp_event_ack_sent(sk); 135 dccp_event_ack_sent(sk);
136 136
137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS); 137 DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
138 138
139 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 139 err = icsk->icsk_af_ops->queue_xmit(skb);
140 return net_xmit_eval(err); 140 return net_xmit_eval(err);
141 } 141 }
142 return -ENOBUFS; 142 return -ENOBUFS;
@@ -198,7 +198,7 @@ void dccp_write_space(struct sock *sk)
198 read_lock(&sk->sk_callback_lock); 198 read_lock(&sk->sk_callback_lock);
199 199
200 if (sk_has_sleeper(sk)) 200 if (sk_has_sleeper(sk))
201 wake_up_interruptible(sk->sk_sleep); 201 wake_up_interruptible(sk_sleep(sk));
202 /* Should agree with poll, otherwise some programs break */ 202 /* Should agree with poll, otherwise some programs break */
203 if (sock_writeable(sk)) 203 if (sock_writeable(sk))
204 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 204 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
@@ -225,7 +225,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
225 dccp_pr_debug("delayed send by %d msec\n", delay); 225 dccp_pr_debug("delayed send by %d msec\n", delay);
226 jiffdelay = msecs_to_jiffies(delay); 226 jiffdelay = msecs_to_jiffies(delay);
227 227
228 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 228 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
229 229
230 sk->sk_write_pending++; 230 sk->sk_write_pending++;
231 release_sock(sk); 231 release_sock(sk);
@@ -241,7 +241,7 @@ static int dccp_wait_for_ccid(struct sock *sk, struct sk_buff *skb, int delay)
241 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb); 241 rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
242 } while ((delay = rc) > 0); 242 } while ((delay = rc) > 0);
243out: 243out:
244 finish_wait(sk->sk_sleep, &wait); 244 finish_wait(sk_sleep(sk), &wait);
245 return rc; 245 return rc;
246 246
247do_error: 247do_error:
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index a0e38d8018f5..b03ecf6b2bb0 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -312,7 +312,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
312 unsigned int mask; 312 unsigned int mask;
313 struct sock *sk = sock->sk; 313 struct sock *sk = sock->sk;
314 314
315 sock_poll_wait(file, sk->sk_sleep, wait); 315 sock_poll_wait(file, sk_sleep(sk), wait);
316 if (sk->sk_state == DCCP_LISTEN) 316 if (sk->sk_state == DCCP_LISTEN)
317 return inet_csk_listen_poll(sk); 317 return inet_csk_listen_poll(sk);
318 318
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index bbfeb5eae46a..1a9aa05d4dc4 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -38,7 +38,7 @@ static int dccp_write_timeout(struct sock *sk)
38 38
39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) { 39 if (sk->sk_state == DCCP_REQUESTING || sk->sk_state == DCCP_PARTOPEN) {
40 if (icsk->icsk_retransmits != 0) 40 if (icsk->icsk_retransmits != 0)
41 dst_negative_advice(&sk->sk_dst_cache, sk); 41 dst_negative_advice(sk);
42 retry_until = icsk->icsk_syn_retries ? 42 retry_until = icsk->icsk_syn_retries ?
43 : sysctl_dccp_request_retries; 43 : sysctl_dccp_request_retries;
44 } else { 44 } else {
@@ -63,7 +63,7 @@ static int dccp_write_timeout(struct sock *sk)
63 Golden words :-). 63 Golden words :-).
64 */ 64 */
65 65
66 dst_negative_advice(&sk->sk_dst_cache, sk); 66 dst_negative_advice(sk);
67 } 67 }
68 68
69 retry_until = sysctl_dccp_retries2; 69 retry_until = sysctl_dccp_retries2;
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2b494fac9468..d6b93d19790f 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -446,7 +446,7 @@ static void dn_destruct(struct sock *sk)
446 skb_queue_purge(&scp->other_xmit_queue); 446 skb_queue_purge(&scp->other_xmit_queue);
447 skb_queue_purge(&scp->other_receive_queue); 447 skb_queue_purge(&scp->other_receive_queue);
448 448
449 dst_release(xchg(&sk->sk_dst_cache, NULL)); 449 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
450} 450}
451 451
452static int dn_memory_pressure; 452static int dn_memory_pressure;
@@ -832,7 +832,7 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
832 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS); 832 scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
833 dn_send_conn_conf(sk, allocation); 833 dn_send_conn_conf(sk, allocation);
834 834
835 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 835 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
836 for(;;) { 836 for(;;) {
837 release_sock(sk); 837 release_sock(sk);
838 if (scp->state == DN_CC) 838 if (scp->state == DN_CC)
@@ -850,9 +850,9 @@ static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation)
850 err = -EAGAIN; 850 err = -EAGAIN;
851 if (!*timeo) 851 if (!*timeo)
852 break; 852 break;
853 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 853 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
854 } 854 }
855 finish_wait(sk->sk_sleep, &wait); 855 finish_wait(sk_sleep(sk), &wait);
856 if (err == 0) { 856 if (err == 0) {
857 sk->sk_socket->state = SS_CONNECTED; 857 sk->sk_socket->state = SS_CONNECTED;
858 } else if (scp->state != DN_CC) { 858 } else if (scp->state != DN_CC) {
@@ -873,7 +873,7 @@ static int dn_wait_run(struct sock *sk, long *timeo)
873 if (!*timeo) 873 if (!*timeo)
874 return -EALREADY; 874 return -EALREADY;
875 875
876 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 876 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
877 for(;;) { 877 for(;;) {
878 release_sock(sk); 878 release_sock(sk);
879 if (scp->state == DN_CI || scp->state == DN_CC) 879 if (scp->state == DN_CI || scp->state == DN_CC)
@@ -891,9 +891,9 @@ static int dn_wait_run(struct sock *sk, long *timeo)
891 err = -ETIMEDOUT; 891 err = -ETIMEDOUT;
892 if (!*timeo) 892 if (!*timeo)
893 break; 893 break;
894 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 894 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
895 } 895 }
896 finish_wait(sk->sk_sleep, &wait); 896 finish_wait(sk_sleep(sk), &wait);
897out: 897out:
898 if (err == 0) { 898 if (err == 0) {
899 sk->sk_socket->state = SS_CONNECTED; 899 sk->sk_socket->state = SS_CONNECTED;
@@ -1040,7 +1040,7 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1040 struct sk_buff *skb = NULL; 1040 struct sk_buff *skb = NULL;
1041 int err = 0; 1041 int err = 0;
1042 1042
1043 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1043 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1044 for(;;) { 1044 for(;;) {
1045 release_sock(sk); 1045 release_sock(sk);
1046 skb = skb_dequeue(&sk->sk_receive_queue); 1046 skb = skb_dequeue(&sk->sk_receive_queue);
@@ -1060,9 +1060,9 @@ static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo)
1060 err = -EAGAIN; 1060 err = -EAGAIN;
1061 if (!*timeo) 1061 if (!*timeo)
1062 break; 1062 break;
1063 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1063 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1064 } 1064 }
1065 finish_wait(sk->sk_sleep, &wait); 1065 finish_wait(sk_sleep(sk), &wait);
1066 1066
1067 return skb == NULL ? ERR_PTR(err) : skb; 1067 return skb == NULL ? ERR_PTR(err) : skb;
1068} 1068}
@@ -1105,7 +1105,7 @@ static int dn_accept(struct socket *sock, struct socket *newsock, int flags)
1105 release_sock(sk); 1105 release_sock(sk);
1106 1106
1107 dst = skb_dst(skb); 1107 dst = skb_dst(skb);
1108 dst_release(xchg(&newsk->sk_dst_cache, dst)); 1108 sk_dst_set(newsk, dst);
1109 skb_dst_set(skb, NULL); 1109 skb_dst_set(skb, NULL);
1110 1110
1111 DN_SK(newsk)->state = DN_CR; 1111 DN_SK(newsk)->state = DN_CR;
@@ -1746,11 +1746,11 @@ static int dn_recvmsg(struct kiocb *iocb, struct socket *sock,
1746 goto out; 1746 goto out;
1747 } 1747 }
1748 1748
1749 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1749 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1750 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); 1751 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target));
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 finish_wait(sk->sk_sleep, &wait); 1753 finish_wait(sk_sleep(sk), &wait);
1754 } 1754 }
1755 1755
1756 skb_queue_walk_safe(queue, skb, n) { 1756 skb_queue_walk_safe(queue, skb, n) {
@@ -1956,7 +1956,7 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
1956 } 1956 }
1957 1957
1958 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) 1958 if ((flags & MSG_TRYHARD) && sk->sk_dst_cache)
1959 dst_negative_advice(&sk->sk_dst_cache, sk); 1959 dst_negative_advice(sk);
1960 1960
1961 mss = scp->segsize_rem; 1961 mss = scp->segsize_rem;
1962 fctype = scp->services_rem & NSP_FC_MASK; 1962 fctype = scp->services_rem & NSP_FC_MASK;
@@ -2003,12 +2003,12 @@ static int dn_sendmsg(struct kiocb *iocb, struct socket *sock,
2003 goto out; 2003 goto out;
2004 } 2004 }
2005 2005
2006 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 2006 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2007 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2008 sk_wait_event(sk, &timeo, 2008 sk_wait_event(sk, &timeo,
2009 !dn_queue_too_long(scp, queue, flags)); 2009 !dn_queue_too_long(scp, queue, flags));
2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 2010 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
2011 finish_wait(sk->sk_sleep, &wait); 2011 finish_wait(sk_sleep(sk), &wait);
2012 continue; 2012 continue;
2013 } 2013 }
2014 2014
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index cead68eb254c..615dbe3b43f9 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -350,7 +350,7 @@ static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int de
350 if (dn_db->dev->type == ARPHRD_ETHER) { 350 if (dn_db->dev->type == ARPHRD_ETHER) {
351 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) { 351 if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
352 dn_dn2eth(mac_addr, ifa1->ifa_local); 352 dn_dn2eth(mac_addr, ifa1->ifa_local);
353 dev_mc_delete(dev, mac_addr, ETH_ALEN, 0); 353 dev_mc_del(dev, mac_addr);
354 } 354 }
355 } 355 }
356 356
@@ -381,7 +381,7 @@ static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
381 if (dev->type == ARPHRD_ETHER) { 381 if (dev->type == ARPHRD_ETHER) {
382 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) { 382 if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
383 dn_dn2eth(mac_addr, ifa->ifa_local); 383 dn_dn2eth(mac_addr, ifa->ifa_local);
384 dev_mc_add(dev, mac_addr, ETH_ALEN, 0); 384 dev_mc_add(dev, mac_addr);
385 } 385 }
386 } 386 }
387 387
@@ -1001,9 +1001,9 @@ static int dn_eth_up(struct net_device *dev)
1001 struct dn_dev *dn_db = dev->dn_ptr; 1001 struct dn_dev *dn_db = dev->dn_ptr;
1002 1002
1003 if (dn_db->parms.forwarding == 0) 1003 if (dn_db->parms.forwarding == 0)
1004 dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1004 dev_mc_add(dev, dn_rt_all_end_mcast);
1005 else 1005 else
1006 dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1006 dev_mc_add(dev, dn_rt_all_rt_mcast);
1007 1007
1008 dn_db->use_long = 1; 1008 dn_db->use_long = 1;
1009 1009
@@ -1015,9 +1015,9 @@ static void dn_eth_down(struct net_device *dev)
1015 struct dn_dev *dn_db = dev->dn_ptr; 1015 struct dn_dev *dn_db = dev->dn_ptr;
1016 1016
1017 if (dn_db->parms.forwarding == 0) 1017 if (dn_db->parms.forwarding == 0)
1018 dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0); 1018 dev_mc_del(dev, dn_rt_all_end_mcast);
1019 else 1019 else
1020 dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0); 1020 dev_mc_del(dev, dn_rt_all_rt_mcast);
1021} 1021}
1022 1022
1023static void dn_dev_set_timer(struct net_device *dev); 1023static void dn_dev_set_timer(struct net_device *dev);
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 7466c546f286..af28dcc21844 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -196,7 +196,6 @@ static int dn_fib_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
196{ 196{
197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule; 197 struct dn_fib_rule *r = (struct dn_fib_rule *)rule;
198 198
199 frh->family = AF_DECnet;
200 frh->dst_len = r->dst_len; 199 frh->dst_len = r->dst_len;
201 frh->src_len = r->src_len; 200 frh->src_len = r->src_len;
202 frh->tos = 0; 201 frh->tos = 0;
@@ -212,30 +211,13 @@ nla_put_failure:
212 return -ENOBUFS; 211 return -ENOBUFS;
213} 212}
214 213
215static u32 dn_fib_rule_default_pref(struct fib_rules_ops *ops)
216{
217 struct list_head *pos;
218 struct fib_rule *rule;
219
220 if (!list_empty(&dn_fib_rules_ops->rules_list)) {
221 pos = dn_fib_rules_ops->rules_list.next;
222 if (pos->next != &dn_fib_rules_ops->rules_list) {
223 rule = list_entry(pos->next, struct fib_rule, list);
224 if (rule->pref)
225 return rule->pref - 1;
226 }
227 }
228
229 return 0;
230}
231
232static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops) 214static void dn_fib_rule_flush_cache(struct fib_rules_ops *ops)
233{ 215{
234 dn_rt_cache_flush(-1); 216 dn_rt_cache_flush(-1);
235} 217}
236 218
237static struct fib_rules_ops dn_fib_rules_ops_template = { 219static struct fib_rules_ops dn_fib_rules_ops_template = {
238 .family = AF_DECnet, 220 .family = FIB_RULES_DECNET,
239 .rule_size = sizeof(struct dn_fib_rule), 221 .rule_size = sizeof(struct dn_fib_rule),
240 .addr_size = sizeof(u16), 222 .addr_size = sizeof(u16),
241 .action = dn_fib_rule_action, 223 .action = dn_fib_rule_action,
@@ -243,7 +225,7 @@ static struct fib_rules_ops dn_fib_rules_ops_template = {
243 .configure = dn_fib_rule_configure, 225 .configure = dn_fib_rule_configure,
244 .compare = dn_fib_rule_compare, 226 .compare = dn_fib_rule_compare,
245 .fill = dn_fib_rule_fill, 227 .fill = dn_fib_rule_fill,
246 .default_pref = dn_fib_rule_default_pref, 228 .default_pref = fib_default_rule_pref,
247 .flush_cache = dn_fib_rule_flush_cache, 229 .flush_cache = dn_fib_rule_flush_cache,
248 .nlgroup = RTNLGRP_DECnet_RULE, 230 .nlgroup = RTNLGRP_DECnet_RULE,
249 .policy = dn_fib_rule_policy, 231 .policy = dn_fib_rule_policy,
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 2175e6d5cc8d..8fdca56bb08f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -67,7 +67,7 @@ static int dsa_slave_open(struct net_device *dev)
67 return -ENETDOWN; 67 return -ENETDOWN;
68 68
69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) { 69 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) {
70 err = dev_unicast_add(master, dev->dev_addr); 70 err = dev_uc_add(master, dev->dev_addr);
71 if (err < 0) 71 if (err < 0)
72 goto out; 72 goto out;
73 } 73 }
@@ -90,7 +90,7 @@ clear_allmulti:
90 dev_set_allmulti(master, -1); 90 dev_set_allmulti(master, -1);
91del_unicast: 91del_unicast:
92 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 92 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
93 dev_unicast_delete(master, dev->dev_addr); 93 dev_uc_del(master, dev->dev_addr);
94out: 94out:
95 return err; 95 return err;
96} 96}
@@ -101,14 +101,14 @@ static int dsa_slave_close(struct net_device *dev)
101 struct net_device *master = p->parent->dst->master_netdev; 101 struct net_device *master = p->parent->dst->master_netdev;
102 102
103 dev_mc_unsync(master, dev); 103 dev_mc_unsync(master, dev);
104 dev_unicast_unsync(master, dev); 104 dev_uc_unsync(master, dev);
105 if (dev->flags & IFF_ALLMULTI) 105 if (dev->flags & IFF_ALLMULTI)
106 dev_set_allmulti(master, -1); 106 dev_set_allmulti(master, -1);
107 if (dev->flags & IFF_PROMISC) 107 if (dev->flags & IFF_PROMISC)
108 dev_set_promiscuity(master, -1); 108 dev_set_promiscuity(master, -1);
109 109
110 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 110 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
111 dev_unicast_delete(master, dev->dev_addr); 111 dev_uc_del(master, dev->dev_addr);
112 112
113 return 0; 113 return 0;
114} 114}
@@ -130,7 +130,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
130 struct net_device *master = p->parent->dst->master_netdev; 130 struct net_device *master = p->parent->dst->master_netdev;
131 131
132 dev_mc_sync(master, dev); 132 dev_mc_sync(master, dev);
133 dev_unicast_sync(master, dev); 133 dev_uc_sync(master, dev);
134} 134}
135 135
136static int dsa_slave_set_mac_address(struct net_device *dev, void *a) 136static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
@@ -147,13 +147,13 @@ static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
147 goto out; 147 goto out;
148 148
149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) { 149 if (compare_ether_addr(addr->sa_data, master->dev_addr)) {
150 err = dev_unicast_add(master, addr->sa_data); 150 err = dev_uc_add(master, addr->sa_data);
151 if (err < 0) 151 if (err < 0)
152 return err; 152 return err;
153 } 153 }
154 154
155 if (compare_ether_addr(dev->dev_addr, master->dev_addr)) 155 if (compare_ether_addr(dev->dev_addr, master->dev_addr))
156 dev_unicast_delete(master, dev->dev_addr); 156 dev_uc_del(master, dev->dev_addr);
157 157
158out: 158out:
159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); 159 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 205a1c12f3c0..35846964082c 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -136,7 +136,7 @@ int eth_rebuild_header(struct sk_buff *skb)
136 default: 136 default:
137 printk(KERN_DEBUG 137 printk(KERN_DEBUG
138 "%s: unable to resolve type %X addresses.\n", 138 "%s: unable to resolve type %X addresses.\n",
139 dev->name, (int)eth->h_proto); 139 dev->name, (__force int)eth->h_proto);
140 140
141 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN); 141 memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
142 break; 142 break;
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index 0c94a1ac2946..8e3a1fd938ab 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -250,6 +250,20 @@ config IP_MROUTE
250 <file:Documentation/networking/multicast.txt>. If you haven't heard 250 <file:Documentation/networking/multicast.txt>. If you haven't heard
251 about it, you don't need it. 251 about it, you don't need it.
252 252
253config IP_MROUTE_MULTIPLE_TABLES
254 bool "IP: multicast policy routing"
255 depends on IP_MROUTE && IP_ADVANCED_ROUTER
256 select FIB_RULES
257 help
258 Normally, a multicast router runs a userspace daemon and decides
259 what to do with a multicast packet based on the source and
260 destination addresses. If you say Y here, the multicast router
261 will also be able to take interfaces and packet marks into
262 account and run multiple instances of userspace daemons
263 simultaneously, each one handling a single table.
264
265 If unsure, say N.
266
253config IP_PIMSM_V1 267config IP_PIMSM_V1
254 bool "IP: PIM-SM version 1 support" 268 bool "IP: PIM-SM version 1 support"
255 depends on IP_MROUTE 269 depends on IP_MROUTE
@@ -587,9 +601,15 @@ choice
587 config DEFAULT_HTCP 601 config DEFAULT_HTCP
588 bool "Htcp" if TCP_CONG_HTCP=y 602 bool "Htcp" if TCP_CONG_HTCP=y
589 603
604 config DEFAULT_HYBLA
605 bool "Hybla" if TCP_CONG_HYBLA=y
606
590 config DEFAULT_VEGAS 607 config DEFAULT_VEGAS
591 bool "Vegas" if TCP_CONG_VEGAS=y 608 bool "Vegas" if TCP_CONG_VEGAS=y
592 609
610 config DEFAULT_VENO
611 bool "Veno" if TCP_CONG_VENO=y
612
593 config DEFAULT_WESTWOOD 613 config DEFAULT_WESTWOOD
594 bool "Westwood" if TCP_CONG_WESTWOOD=y 614 bool "Westwood" if TCP_CONG_WESTWOOD=y
595 615
@@ -610,8 +630,10 @@ config DEFAULT_TCP_CONG
610 default "bic" if DEFAULT_BIC 630 default "bic" if DEFAULT_BIC
611 default "cubic" if DEFAULT_CUBIC 631 default "cubic" if DEFAULT_CUBIC
612 default "htcp" if DEFAULT_HTCP 632 default "htcp" if DEFAULT_HTCP
633 default "hybla" if DEFAULT_HYBLA
613 default "vegas" if DEFAULT_VEGAS 634 default "vegas" if DEFAULT_VEGAS
614 default "westwood" if DEFAULT_WESTWOOD 635 default "westwood" if DEFAULT_WESTWOOD
636 default "veno" if DEFAULT_VENO
615 default "reno" if DEFAULT_RENO 637 default "reno" if DEFAULT_RENO
616 default "cubic" 638 default "cubic"
617 639
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f71357422380..9f52880fae10 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -154,7 +154,7 @@ void inet_sock_destruct(struct sock *sk)
154 WARN_ON(sk->sk_forward_alloc); 154 WARN_ON(sk->sk_forward_alloc);
155 155
156 kfree(inet->opt); 156 kfree(inet->opt);
157 dst_release(sk->sk_dst_cache); 157 dst_release(rcu_dereference_check(sk->sk_dst_cache, 1));
158 sk_refcnt_debug_dec(sk); 158 sk_refcnt_debug_dec(sk);
159} 159}
160EXPORT_SYMBOL(inet_sock_destruct); 160EXPORT_SYMBOL(inet_sock_destruct);
@@ -419,6 +419,8 @@ int inet_release(struct socket *sock)
419 if (sk) { 419 if (sk) {
420 long timeout; 420 long timeout;
421 421
422 inet_rps_reset_flow(sk);
423
422 /* Applications forget to leave groups before exiting */ 424 /* Applications forget to leave groups before exiting */
423 ip_mc_drop_socket(sk); 425 ip_mc_drop_socket(sk);
424 426
@@ -546,7 +548,7 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
546{ 548{
547 DEFINE_WAIT(wait); 549 DEFINE_WAIT(wait);
548 550
549 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 551 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
550 552
551 /* Basic assumption: if someone sets sk->sk_err, he _must_ 553 /* Basic assumption: if someone sets sk->sk_err, he _must_
552 * change state of the socket from TCP_SYN_*. 554 * change state of the socket from TCP_SYN_*.
@@ -559,9 +561,9 @@ static long inet_wait_for_connect(struct sock *sk, long timeo)
559 lock_sock(sk); 561 lock_sock(sk);
560 if (signal_pending(current) || !timeo) 562 if (signal_pending(current) || !timeo)
561 break; 563 break;
562 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 564 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
563 } 565 }
564 finish_wait(sk->sk_sleep, &wait); 566 finish_wait(sk_sleep(sk), &wait);
565 return timeo; 567 return timeo;
566} 568}
567 569
@@ -720,6 +722,8 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
720{ 722{
721 struct sock *sk = sock->sk; 723 struct sock *sk = sock->sk;
722 724
725 inet_rps_record_flow(sk);
726
723 /* We may need to bind the socket. */ 727 /* We may need to bind the socket. */
724 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 728 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
725 return -EAGAIN; 729 return -EAGAIN;
@@ -728,12 +732,13 @@ int inet_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
728} 732}
729EXPORT_SYMBOL(inet_sendmsg); 733EXPORT_SYMBOL(inet_sendmsg);
730 734
731
732static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset, 735static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
733 size_t size, int flags) 736 size_t size, int flags)
734{ 737{
735 struct sock *sk = sock->sk; 738 struct sock *sk = sock->sk;
736 739
740 inet_rps_record_flow(sk);
741
737 /* We may need to bind the socket. */ 742 /* We may need to bind the socket. */
738 if (!inet_sk(sk)->inet_num && inet_autobind(sk)) 743 if (!inet_sk(sk)->inet_num && inet_autobind(sk))
739 return -EAGAIN; 744 return -EAGAIN;
@@ -743,6 +748,22 @@ static ssize_t inet_sendpage(struct socket *sock, struct page *page, int offset,
743 return sock_no_sendpage(sock, page, offset, size, flags); 748 return sock_no_sendpage(sock, page, offset, size, flags);
744} 749}
745 750
751int inet_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
752 size_t size, int flags)
753{
754 struct sock *sk = sock->sk;
755 int addr_len = 0;
756 int err;
757
758 inet_rps_record_flow(sk);
759
760 err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
761 flags & ~MSG_DONTWAIT, &addr_len);
762 if (err >= 0)
763 msg->msg_namelen = addr_len;
764 return err;
765}
766EXPORT_SYMBOL(inet_recvmsg);
746 767
747int inet_shutdown(struct socket *sock, int how) 768int inet_shutdown(struct socket *sock, int how)
748{ 769{
@@ -872,7 +893,7 @@ const struct proto_ops inet_stream_ops = {
872 .setsockopt = sock_common_setsockopt, 893 .setsockopt = sock_common_setsockopt,
873 .getsockopt = sock_common_getsockopt, 894 .getsockopt = sock_common_getsockopt,
874 .sendmsg = tcp_sendmsg, 895 .sendmsg = tcp_sendmsg,
875 .recvmsg = sock_common_recvmsg, 896 .recvmsg = inet_recvmsg,
876 .mmap = sock_no_mmap, 897 .mmap = sock_no_mmap,
877 .sendpage = tcp_sendpage, 898 .sendpage = tcp_sendpage,
878 .splice_read = tcp_splice_read, 899 .splice_read = tcp_splice_read,
@@ -899,7 +920,7 @@ const struct proto_ops inet_dgram_ops = {
899 .setsockopt = sock_common_setsockopt, 920 .setsockopt = sock_common_setsockopt,
900 .getsockopt = sock_common_getsockopt, 921 .getsockopt = sock_common_getsockopt,
901 .sendmsg = inet_sendmsg, 922 .sendmsg = inet_sendmsg,
902 .recvmsg = sock_common_recvmsg, 923 .recvmsg = inet_recvmsg,
903 .mmap = sock_no_mmap, 924 .mmap = sock_no_mmap,
904 .sendpage = inet_sendpage, 925 .sendpage = inet_sendpage,
905#ifdef CONFIG_COMPAT 926#ifdef CONFIG_COMPAT
@@ -929,7 +950,7 @@ static const struct proto_ops inet_sockraw_ops = {
929 .setsockopt = sock_common_setsockopt, 950 .setsockopt = sock_common_setsockopt,
930 .getsockopt = sock_common_getsockopt, 951 .getsockopt = sock_common_getsockopt,
931 .sendmsg = inet_sendmsg, 952 .sendmsg = inet_sendmsg,
932 .recvmsg = sock_common_recvmsg, 953 .recvmsg = inet_recvmsg,
933 .mmap = sock_no_mmap, 954 .mmap = sock_no_mmap,
934 .sendpage = inet_sendpage, 955 .sendpage = inet_sendpage,
935#ifdef CONFIG_COMPAT 956#ifdef CONFIG_COMPAT
@@ -1302,8 +1323,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1302 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) 1323 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
1303 goto out_unlock; 1324 goto out_unlock;
1304 1325
1305 id = ntohl(*(u32 *)&iph->id); 1326 id = ntohl(*(__be32 *)&iph->id);
1306 flush = (u16)((ntohl(*(u32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF)); 1327 flush = (u16)((ntohl(*(__be32 *)iph) ^ skb_gro_len(skb)) | (id ^ IP_DF));
1307 id >>= 16; 1328 id >>= 16;
1308 1329
1309 for (p = *head; p; p = p->next) { 1330 for (p = *head; p; p = p->next) {
@@ -1316,8 +1337,8 @@ static struct sk_buff **inet_gro_receive(struct sk_buff **head,
1316 1337
1317 if ((iph->protocol ^ iph2->protocol) | 1338 if ((iph->protocol ^ iph2->protocol) |
1318 (iph->tos ^ iph2->tos) | 1339 (iph->tos ^ iph2->tos) |
1319 (iph->saddr ^ iph2->saddr) | 1340 ((__force u32)iph->saddr ^ (__force u32)iph2->saddr) |
1320 (iph->daddr ^ iph2->daddr)) { 1341 ((__force u32)iph->daddr ^ (__force u32)iph2->daddr)) {
1321 NAPI_GRO_CB(p)->same_flow = 0; 1342 NAPI_GRO_CB(p)->same_flow = 0;
1322 continue; 1343 continue;
1323 } 1344 }
@@ -1407,10 +1428,10 @@ EXPORT_SYMBOL_GPL(snmp_fold_field);
1407int snmp_mib_init(void __percpu *ptr[2], size_t mibsize) 1428int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
1408{ 1429{
1409 BUG_ON(ptr == NULL); 1430 BUG_ON(ptr == NULL);
1410 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1431 ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1411 if (!ptr[0]) 1432 if (!ptr[0])
1412 goto err0; 1433 goto err0;
1413 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long)); 1434 ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
1414 if (!ptr[1]) 1435 if (!ptr[1])
1415 goto err1; 1436 goto err1;
1416 return 0; 1437 return 0;
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 90e3d6379a42..382bc768ed56 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1096,10 +1096,10 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
1096 case NETDEV_DOWN: 1096 case NETDEV_DOWN:
1097 ip_mc_down(in_dev); 1097 ip_mc_down(in_dev);
1098 break; 1098 break;
1099 case NETDEV_BONDING_OLDTYPE: 1099 case NETDEV_PRE_TYPE_CHANGE:
1100 ip_mc_unmap(in_dev); 1100 ip_mc_unmap(in_dev);
1101 break; 1101 break;
1102 case NETDEV_BONDING_NEWTYPE: 1102 case NETDEV_POST_TYPE_CHANGE:
1103 ip_mc_remap(in_dev); 1103 ip_mc_remap(in_dev);
1104 break; 1104 break;
1105 case NETDEV_CHANGEMTU: 1105 case NETDEV_CHANGEMTU:
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index ca2d07b1c706..3ec84fea5b71 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -213,7 +213,6 @@ static int fib4_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
213{ 213{
214 struct fib4_rule *rule4 = (struct fib4_rule *) rule; 214 struct fib4_rule *rule4 = (struct fib4_rule *) rule;
215 215
216 frh->family = AF_INET;
217 frh->dst_len = rule4->dst_len; 216 frh->dst_len = rule4->dst_len;
218 frh->src_len = rule4->src_len; 217 frh->src_len = rule4->src_len;
219 frh->tos = rule4->tos; 218 frh->tos = rule4->tos;
@@ -234,23 +233,6 @@ nla_put_failure:
234 return -ENOBUFS; 233 return -ENOBUFS;
235} 234}
236 235
237static u32 fib4_rule_default_pref(struct fib_rules_ops *ops)
238{
239 struct list_head *pos;
240 struct fib_rule *rule;
241
242 if (!list_empty(&ops->rules_list)) {
243 pos = ops->rules_list.next;
244 if (pos->next != &ops->rules_list) {
245 rule = list_entry(pos->next, struct fib_rule, list);
246 if (rule->pref)
247 return rule->pref - 1;
248 }
249 }
250
251 return 0;
252}
253
254static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule) 236static size_t fib4_rule_nlmsg_payload(struct fib_rule *rule)
255{ 237{
256 return nla_total_size(4) /* dst */ 238 return nla_total_size(4) /* dst */
@@ -264,7 +246,7 @@ static void fib4_rule_flush_cache(struct fib_rules_ops *ops)
264} 246}
265 247
266static struct fib_rules_ops fib4_rules_ops_template = { 248static struct fib_rules_ops fib4_rules_ops_template = {
267 .family = AF_INET, 249 .family = FIB_RULES_IPV4,
268 .rule_size = sizeof(struct fib4_rule), 250 .rule_size = sizeof(struct fib4_rule),
269 .addr_size = sizeof(u32), 251 .addr_size = sizeof(u32),
270 .action = fib4_rule_action, 252 .action = fib4_rule_action,
@@ -272,7 +254,7 @@ static struct fib_rules_ops fib4_rules_ops_template = {
272 .configure = fib4_rule_configure, 254 .configure = fib4_rule_configure,
273 .compare = fib4_rule_compare, 255 .compare = fib4_rule_compare,
274 .fill = fib4_rule_fill, 256 .fill = fib4_rule_fill,
275 .default_pref = fib4_rule_default_pref, 257 .default_pref = fib_default_rule_pref,
276 .nlmsg_payload = fib4_rule_nlmsg_payload, 258 .nlmsg_payload = fib4_rule_nlmsg_payload,
277 .flush_cache = fib4_rule_flush_cache, 259 .flush_cache = fib4_rule_flush_cache,
278 .nlgroup = RTNLGRP_IPV4_RULE, 260 .nlgroup = RTNLGRP_IPV4_RULE,
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index ac4dec132735..f3d339f728b0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -331,9 +331,10 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
331 if (ip_append_data(sk, icmp_glue_bits, icmp_param, 331 if (ip_append_data(sk, icmp_glue_bits, icmp_param,
332 icmp_param->data_len+icmp_param->head_len, 332 icmp_param->data_len+icmp_param->head_len,
333 icmp_param->head_len, 333 icmp_param->head_len,
334 ipc, rt, MSG_DONTWAIT) < 0) 334 ipc, rt, MSG_DONTWAIT) < 0) {
335 ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS);
335 ip_flush_pending_frames(sk); 336 ip_flush_pending_frames(sk);
336 else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { 337 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
337 struct icmphdr *icmph = icmp_hdr(skb); 338 struct icmphdr *icmph = icmp_hdr(skb);
338 __wsum csum = 0; 339 __wsum csum = 0;
339 struct sk_buff *skb1; 340 struct sk_buff *skb1;
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 15d3eeda92f5..5fff865a4fa7 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -998,7 +998,7 @@ static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
998 --ANK 998 --ANK
999 */ 999 */
1000 if (arp_mc_map(addr, buf, dev, 0) == 0) 1000 if (arp_mc_map(addr, buf, dev, 0) == 0)
1001 dev_mc_add(dev, buf, dev->addr_len, 0); 1001 dev_mc_add(dev, buf);
1002} 1002}
1003 1003
1004/* 1004/*
@@ -1011,7 +1011,7 @@ static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
1011 struct net_device *dev = in_dev->dev; 1011 struct net_device *dev = in_dev->dev;
1012 1012
1013 if (arp_mc_map(addr, buf, dev, 0) == 0) 1013 if (arp_mc_map(addr, buf, dev, 0) == 0)
1014 dev_mc_delete(dev, buf, dev->addr_len, 0); 1014 dev_mc_del(dev, buf);
1015} 1015}
1016 1016
1017#ifdef CONFIG_IP_MULTICAST 1017#ifdef CONFIG_IP_MULTICAST
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 8da6429269dd..e0a3e3537b14 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -234,7 +234,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
234 * having to remove and re-insert us on the wait queue. 234 * having to remove and re-insert us on the wait queue.
235 */ 235 */
236 for (;;) { 236 for (;;) {
237 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 237 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
238 TASK_INTERRUPTIBLE); 238 TASK_INTERRUPTIBLE);
239 release_sock(sk); 239 release_sock(sk);
240 if (reqsk_queue_empty(&icsk->icsk_accept_queue)) 240 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
@@ -253,7 +253,7 @@ static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
253 if (!timeo) 253 if (!timeo)
254 break; 254 break;
255 } 255 }
256 finish_wait(sk->sk_sleep, &wait); 256 finish_wait(sk_sleep(sk), &wait);
257 return err; 257 return err;
258} 258}
259 259
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index d1bcc9f21d4f..f0392191740b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -311,7 +311,7 @@ int ip_output(struct sk_buff *skb)
311 !(IPCB(skb)->flags & IPSKB_REROUTED)); 311 !(IPCB(skb)->flags & IPSKB_REROUTED));
312} 312}
313 313
314int ip_queue_xmit(struct sk_buff *skb, int ipfragok) 314int ip_queue_xmit(struct sk_buff *skb)
315{ 315{
316 struct sock *sk = skb->sk; 316 struct sock *sk = skb->sk;
317 struct inet_sock *inet = inet_sk(sk); 317 struct inet_sock *inet = inet_sk(sk);
@@ -370,7 +370,7 @@ packet_routed:
370 skb_reset_network_header(skb); 370 skb_reset_network_header(skb);
371 iph = ip_hdr(skb); 371 iph = ip_hdr(skb);
372 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); 372 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
373 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok) 373 if (ip_dont_fragment(sk, &rt->u.dst) && !skb->local_df)
374 iph->frag_off = htons(IP_DF); 374 iph->frag_off = htons(IP_DF);
375 else 375 else
376 iph->frag_off = 0; 376 iph->frag_off = 0;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 1e64dabbd232..b0aa0546a3b3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -287,12 +287,8 @@ int ip_ra_control(struct sock *sk, unsigned char on,
287void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, 287void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err,
288 __be16 port, u32 info, u8 *payload) 288 __be16 port, u32 info, u8 *payload)
289{ 289{
290 struct inet_sock *inet = inet_sk(sk);
291 struct sock_exterr_skb *serr; 290 struct sock_exterr_skb *serr;
292 291
293 if (!inet->recverr)
294 return;
295
296 skb = skb_clone(skb, GFP_ATOMIC); 292 skb = skb_clone(skb, GFP_ATOMIC);
297 if (!skb) 293 if (!skb)
298 return; 294 return;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 067ce9e043dc..b9d84e800cf4 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -976,7 +976,7 @@ static int __init ic_bootp_recv(struct sk_buff *skb, struct net_device *dev, str
976 /* Is it a reply for the device we are configuring? */ 976 /* Is it a reply for the device we are configuring? */
977 if (b->xid != ic_dev_xid) { 977 if (b->xid != ic_dev_xid) {
978 if (net_ratelimit()) 978 if (net_ratelimit())
979 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet \n"); 979 printk(KERN_ERR "DHCP/BOOTP: Ignoring delayed packet\n");
980 goto drop_unlock; 980 goto drop_unlock;
981 } 981 }
982 982
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 9d4f6d1340a4..a2df5012a1d0 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -63,11 +63,40 @@
63#include <net/ipip.h> 63#include <net/ipip.h>
64#include <net/checksum.h> 64#include <net/checksum.h>
65#include <net/netlink.h> 65#include <net/netlink.h>
66#include <net/fib_rules.h>
66 67
67#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2) 68#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
68#define CONFIG_IP_PIMSM 1 69#define CONFIG_IP_PIMSM 1
69#endif 70#endif
70 71
72struct mr_table {
73 struct list_head list;
74#ifdef CONFIG_NET_NS
75 struct net *net;
76#endif
77 u32 id;
78 struct sock *mroute_sk;
79 struct timer_list ipmr_expire_timer;
80 struct list_head mfc_unres_queue;
81 struct list_head mfc_cache_array[MFC_LINES];
82 struct vif_device vif_table[MAXVIFS];
83 int maxvif;
84 atomic_t cache_resolve_queue_len;
85 int mroute_do_assert;
86 int mroute_do_pim;
87#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
88 int mroute_reg_vif_num;
89#endif
90};
91
92struct ipmr_rule {
93 struct fib_rule common;
94};
95
96struct ipmr_result {
97 struct mr_table *mrt;
98};
99
71/* Big lock, protecting vif table, mrt cache and mroute socket state. 100/* Big lock, protecting vif table, mrt cache and mroute socket state.
72 Note that the changes are semaphored via rtnl_lock. 101 Note that the changes are semaphored via rtnl_lock.
73 */ 102 */
@@ -78,9 +107,7 @@ static DEFINE_RWLOCK(mrt_lock);
78 * Multicast router control variables 107 * Multicast router control variables
79 */ 108 */
80 109
81#define VIF_EXISTS(_net, _idx) ((_net)->ipv4.vif_table[_idx].dev != NULL) 110#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
82
83static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
84 111
85/* Special spinlock for queue of unresolved entries */ 112/* Special spinlock for queue of unresolved entries */
86static DEFINE_SPINLOCK(mfc_unres_lock); 113static DEFINE_SPINLOCK(mfc_unres_lock);
@@ -95,12 +122,215 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
95 122
96static struct kmem_cache *mrt_cachep __read_mostly; 123static struct kmem_cache *mrt_cachep __read_mostly;
97 124
98static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); 125static struct mr_table *ipmr_new_table(struct net *net, u32 id);
99static int ipmr_cache_report(struct net *net, 126static int ip_mr_forward(struct net *net, struct mr_table *mrt,
127 struct sk_buff *skb, struct mfc_cache *cache,
128 int local);
129static int ipmr_cache_report(struct mr_table *mrt,
100 struct sk_buff *pkt, vifi_t vifi, int assert); 130 struct sk_buff *pkt, vifi_t vifi, int assert);
101static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm); 131static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
132 struct mfc_cache *c, struct rtmsg *rtm);
133static void ipmr_expire_process(unsigned long arg);
134
135#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
136#define ipmr_for_each_table(mrt, net) \
137 list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)
138
139static struct mr_table *ipmr_get_table(struct net *net, u32 id)
140{
141 struct mr_table *mrt;
142
143 ipmr_for_each_table(mrt, net) {
144 if (mrt->id == id)
145 return mrt;
146 }
147 return NULL;
148}
149
150static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
151 struct mr_table **mrt)
152{
153 struct ipmr_result res;
154 struct fib_lookup_arg arg = { .result = &res, };
155 int err;
156
157 err = fib_rules_lookup(net->ipv4.mr_rules_ops, flp, 0, &arg);
158 if (err < 0)
159 return err;
160 *mrt = res.mrt;
161 return 0;
162}
163
164static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp,
165 int flags, struct fib_lookup_arg *arg)
166{
167 struct ipmr_result *res = arg->result;
168 struct mr_table *mrt;
169
170 switch (rule->action) {
171 case FR_ACT_TO_TBL:
172 break;
173 case FR_ACT_UNREACHABLE:
174 return -ENETUNREACH;
175 case FR_ACT_PROHIBIT:
176 return -EACCES;
177 case FR_ACT_BLACKHOLE:
178 default:
179 return -EINVAL;
180 }
181
182 mrt = ipmr_get_table(rule->fr_net, rule->table);
183 if (mrt == NULL)
184 return -EAGAIN;
185 res->mrt = mrt;
186 return 0;
187}
188
189static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags)
190{
191 return 1;
192}
193
194static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = {
195 FRA_GENERIC_POLICY,
196};
197
198static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb,
199 struct fib_rule_hdr *frh, struct nlattr **tb)
200{
201 return 0;
202}
203
204static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh,
205 struct nlattr **tb)
206{
207 return 1;
208}
209
210static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
211 struct fib_rule_hdr *frh)
212{
213 frh->dst_len = 0;
214 frh->src_len = 0;
215 frh->tos = 0;
216 return 0;
217}
218
219static struct fib_rules_ops ipmr_rules_ops_template = {
220 .family = FIB_RULES_IPMR,
221 .rule_size = sizeof(struct ipmr_rule),
222 .addr_size = sizeof(u32),
223 .action = ipmr_rule_action,
224 .match = ipmr_rule_match,
225 .configure = ipmr_rule_configure,
226 .compare = ipmr_rule_compare,
227 .default_pref = fib_default_rule_pref,
228 .fill = ipmr_rule_fill,
229 .nlgroup = RTNLGRP_IPV4_RULE,
230 .policy = ipmr_rule_policy,
231 .owner = THIS_MODULE,
232};
233
234static int __net_init ipmr_rules_init(struct net *net)
235{
236 struct fib_rules_ops *ops;
237 struct mr_table *mrt;
238 int err;
239
240 ops = fib_rules_register(&ipmr_rules_ops_template, net);
241 if (IS_ERR(ops))
242 return PTR_ERR(ops);
243
244 INIT_LIST_HEAD(&net->ipv4.mr_tables);
245
246 mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
247 if (mrt == NULL) {
248 err = -ENOMEM;
249 goto err1;
250 }
251
252 err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0);
253 if (err < 0)
254 goto err2;
255
256 net->ipv4.mr_rules_ops = ops;
257 return 0;
258
259err2:
260 kfree(mrt);
261err1:
262 fib_rules_unregister(ops);
263 return err;
264}
102 265
103static struct timer_list ipmr_expire_timer; 266static void __net_exit ipmr_rules_exit(struct net *net)
267{
268 struct mr_table *mrt, *next;
269
270 list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list)
271 kfree(mrt);
272 fib_rules_unregister(net->ipv4.mr_rules_ops);
273}
274#else
275#define ipmr_for_each_table(mrt, net) \
276 for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
277
278static struct mr_table *ipmr_get_table(struct net *net, u32 id)
279{
280 return net->ipv4.mrt;
281}
282
283static int ipmr_fib_lookup(struct net *net, struct flowi *flp,
284 struct mr_table **mrt)
285{
286 *mrt = net->ipv4.mrt;
287 return 0;
288}
289
290static int __net_init ipmr_rules_init(struct net *net)
291{
292 net->ipv4.mrt = ipmr_new_table(net, RT_TABLE_DEFAULT);
293 return net->ipv4.mrt ? 0 : -ENOMEM;
294}
295
296static void __net_exit ipmr_rules_exit(struct net *net)
297{
298 kfree(net->ipv4.mrt);
299}
300#endif
301
302static struct mr_table *ipmr_new_table(struct net *net, u32 id)
303{
304 struct mr_table *mrt;
305 unsigned int i;
306
307 mrt = ipmr_get_table(net, id);
308 if (mrt != NULL)
309 return mrt;
310
311 mrt = kzalloc(sizeof(*mrt), GFP_KERNEL);
312 if (mrt == NULL)
313 return NULL;
314 write_pnet(&mrt->net, net);
315 mrt->id = id;
316
317 /* Forwarding cache */
318 for (i = 0; i < MFC_LINES; i++)
319 INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
320
321 INIT_LIST_HEAD(&mrt->mfc_unres_queue);
322
323 setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
324 (unsigned long)mrt);
325
326#ifdef CONFIG_IP_PIMSM
327 mrt->mroute_reg_vif_num = -1;
328#endif
329#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
330 list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables);
331#endif
332 return mrt;
333}
104 334
105/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */ 335/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
106 336
@@ -201,12 +431,22 @@ failure:
201static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) 431static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
202{ 432{
203 struct net *net = dev_net(dev); 433 struct net *net = dev_net(dev);
434 struct mr_table *mrt;
435 struct flowi fl = {
436 .oif = dev->ifindex,
437 .iif = skb->skb_iif,
438 .mark = skb->mark,
439 };
440 int err;
441
442 err = ipmr_fib_lookup(net, &fl, &mrt);
443 if (err < 0)
444 return err;
204 445
205 read_lock(&mrt_lock); 446 read_lock(&mrt_lock);
206 dev->stats.tx_bytes += skb->len; 447 dev->stats.tx_bytes += skb->len;
207 dev->stats.tx_packets++; 448 dev->stats.tx_packets++;
208 ipmr_cache_report(net, skb, net->ipv4.mroute_reg_vif_num, 449 ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT);
209 IGMPMSG_WHOLEPKT);
210 read_unlock(&mrt_lock); 450 read_unlock(&mrt_lock);
211 kfree_skb(skb); 451 kfree_skb(skb);
212 return NETDEV_TX_OK; 452 return NETDEV_TX_OK;
@@ -226,12 +466,18 @@ static void reg_vif_setup(struct net_device *dev)
226 dev->features |= NETIF_F_NETNS_LOCAL; 466 dev->features |= NETIF_F_NETNS_LOCAL;
227} 467}
228 468
229static struct net_device *ipmr_reg_vif(struct net *net) 469static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
230{ 470{
231 struct net_device *dev; 471 struct net_device *dev;
232 struct in_device *in_dev; 472 struct in_device *in_dev;
473 char name[IFNAMSIZ];
233 474
234 dev = alloc_netdev(0, "pimreg", reg_vif_setup); 475 if (mrt->id == RT_TABLE_DEFAULT)
476 sprintf(name, "pimreg");
477 else
478 sprintf(name, "pimreg%u", mrt->id);
479
480 dev = alloc_netdev(0, name, reg_vif_setup);
235 481
236 if (dev == NULL) 482 if (dev == NULL)
237 return NULL; 483 return NULL;
@@ -276,17 +522,17 @@ failure:
276 * @notify: Set to 1, if the caller is a notifier_call 522 * @notify: Set to 1, if the caller is a notifier_call
277 */ 523 */
278 524
279static int vif_delete(struct net *net, int vifi, int notify, 525static int vif_delete(struct mr_table *mrt, int vifi, int notify,
280 struct list_head *head) 526 struct list_head *head)
281{ 527{
282 struct vif_device *v; 528 struct vif_device *v;
283 struct net_device *dev; 529 struct net_device *dev;
284 struct in_device *in_dev; 530 struct in_device *in_dev;
285 531
286 if (vifi < 0 || vifi >= net->ipv4.maxvif) 532 if (vifi < 0 || vifi >= mrt->maxvif)
287 return -EADDRNOTAVAIL; 533 return -EADDRNOTAVAIL;
288 534
289 v = &net->ipv4.vif_table[vifi]; 535 v = &mrt->vif_table[vifi];
290 536
291 write_lock_bh(&mrt_lock); 537 write_lock_bh(&mrt_lock);
292 dev = v->dev; 538 dev = v->dev;
@@ -298,17 +544,17 @@ static int vif_delete(struct net *net, int vifi, int notify,
298 } 544 }
299 545
300#ifdef CONFIG_IP_PIMSM 546#ifdef CONFIG_IP_PIMSM
301 if (vifi == net->ipv4.mroute_reg_vif_num) 547 if (vifi == mrt->mroute_reg_vif_num)
302 net->ipv4.mroute_reg_vif_num = -1; 548 mrt->mroute_reg_vif_num = -1;
303#endif 549#endif
304 550
305 if (vifi+1 == net->ipv4.maxvif) { 551 if (vifi+1 == mrt->maxvif) {
306 int tmp; 552 int tmp;
307 for (tmp=vifi-1; tmp>=0; tmp--) { 553 for (tmp=vifi-1; tmp>=0; tmp--) {
308 if (VIF_EXISTS(net, tmp)) 554 if (VIF_EXISTS(mrt, tmp))
309 break; 555 break;
310 } 556 }
311 net->ipv4.maxvif = tmp+1; 557 mrt->maxvif = tmp+1;
312 } 558 }
313 559
314 write_unlock_bh(&mrt_lock); 560 write_unlock_bh(&mrt_lock);
@@ -329,7 +575,6 @@ static int vif_delete(struct net *net, int vifi, int notify,
329 575
330static inline void ipmr_cache_free(struct mfc_cache *c) 576static inline void ipmr_cache_free(struct mfc_cache *c)
331{ 577{
332 release_net(mfc_net(c));
333 kmem_cache_free(mrt_cachep, c); 578 kmem_cache_free(mrt_cachep, c);
334} 579}
335 580
@@ -337,13 +582,13 @@ static inline void ipmr_cache_free(struct mfc_cache *c)
337 and reporting error to netlink readers. 582 and reporting error to netlink readers.
338 */ 583 */
339 584
340static void ipmr_destroy_unres(struct mfc_cache *c) 585static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c)
341{ 586{
587 struct net *net = read_pnet(&mrt->net);
342 struct sk_buff *skb; 588 struct sk_buff *skb;
343 struct nlmsgerr *e; 589 struct nlmsgerr *e;
344 struct net *net = mfc_net(c);
345 590
346 atomic_dec(&net->ipv4.cache_resolve_queue_len); 591 atomic_dec(&mrt->cache_resolve_queue_len);
347 592
348 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { 593 while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) {
349 if (ip_hdr(skb)->version == 0) { 594 if (ip_hdr(skb)->version == 0) {
@@ -364,42 +609,40 @@ static void ipmr_destroy_unres(struct mfc_cache *c)
364} 609}
365 610
366 611
367/* Single timer process for all the unresolved queue. */ 612/* Timer process for the unresolved queue. */
368 613
369static void ipmr_expire_process(unsigned long dummy) 614static void ipmr_expire_process(unsigned long arg)
370{ 615{
616 struct mr_table *mrt = (struct mr_table *)arg;
371 unsigned long now; 617 unsigned long now;
372 unsigned long expires; 618 unsigned long expires;
373 struct mfc_cache *c, **cp; 619 struct mfc_cache *c, *next;
374 620
375 if (!spin_trylock(&mfc_unres_lock)) { 621 if (!spin_trylock(&mfc_unres_lock)) {
376 mod_timer(&ipmr_expire_timer, jiffies+HZ/10); 622 mod_timer(&mrt->ipmr_expire_timer, jiffies+HZ/10);
377 return; 623 return;
378 } 624 }
379 625
380 if (mfc_unres_queue == NULL) 626 if (list_empty(&mrt->mfc_unres_queue))
381 goto out; 627 goto out;
382 628
383 now = jiffies; 629 now = jiffies;
384 expires = 10*HZ; 630 expires = 10*HZ;
385 cp = &mfc_unres_queue;
386 631
387 while ((c=*cp) != NULL) { 632 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
388 if (time_after(c->mfc_un.unres.expires, now)) { 633 if (time_after(c->mfc_un.unres.expires, now)) {
389 unsigned long interval = c->mfc_un.unres.expires - now; 634 unsigned long interval = c->mfc_un.unres.expires - now;
390 if (interval < expires) 635 if (interval < expires)
391 expires = interval; 636 expires = interval;
392 cp = &c->next;
393 continue; 637 continue;
394 } 638 }
395 639
396 *cp = c->next; 640 list_del(&c->list);
397 641 ipmr_destroy_unres(mrt, c);
398 ipmr_destroy_unres(c);
399 } 642 }
400 643
401 if (mfc_unres_queue != NULL) 644 if (!list_empty(&mrt->mfc_unres_queue))
402 mod_timer(&ipmr_expire_timer, jiffies + expires); 645 mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
403 646
404out: 647out:
405 spin_unlock(&mfc_unres_lock); 648 spin_unlock(&mfc_unres_lock);
@@ -407,17 +650,17 @@ out:
407 650
408/* Fill oifs list. It is called under write locked mrt_lock. */ 651/* Fill oifs list. It is called under write locked mrt_lock. */
409 652
410static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls) 653static void ipmr_update_thresholds(struct mr_table *mrt, struct mfc_cache *cache,
654 unsigned char *ttls)
411{ 655{
412 int vifi; 656 int vifi;
413 struct net *net = mfc_net(cache);
414 657
415 cache->mfc_un.res.minvif = MAXVIFS; 658 cache->mfc_un.res.minvif = MAXVIFS;
416 cache->mfc_un.res.maxvif = 0; 659 cache->mfc_un.res.maxvif = 0;
417 memset(cache->mfc_un.res.ttls, 255, MAXVIFS); 660 memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
418 661
419 for (vifi = 0; vifi < net->ipv4.maxvif; vifi++) { 662 for (vifi = 0; vifi < mrt->maxvif; vifi++) {
420 if (VIF_EXISTS(net, vifi) && 663 if (VIF_EXISTS(mrt, vifi) &&
421 ttls[vifi] && ttls[vifi] < 255) { 664 ttls[vifi] && ttls[vifi] < 255) {
422 cache->mfc_un.res.ttls[vifi] = ttls[vifi]; 665 cache->mfc_un.res.ttls[vifi] = ttls[vifi];
423 if (cache->mfc_un.res.minvif > vifi) 666 if (cache->mfc_un.res.minvif > vifi)
@@ -428,16 +671,17 @@ static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
428 } 671 }
429} 672}
430 673
431static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock) 674static int vif_add(struct net *net, struct mr_table *mrt,
675 struct vifctl *vifc, int mrtsock)
432{ 676{
433 int vifi = vifc->vifc_vifi; 677 int vifi = vifc->vifc_vifi;
434 struct vif_device *v = &net->ipv4.vif_table[vifi]; 678 struct vif_device *v = &mrt->vif_table[vifi];
435 struct net_device *dev; 679 struct net_device *dev;
436 struct in_device *in_dev; 680 struct in_device *in_dev;
437 int err; 681 int err;
438 682
439 /* Is vif busy ? */ 683 /* Is vif busy ? */
440 if (VIF_EXISTS(net, vifi)) 684 if (VIF_EXISTS(mrt, vifi))
441 return -EADDRINUSE; 685 return -EADDRINUSE;
442 686
443 switch (vifc->vifc_flags) { 687 switch (vifc->vifc_flags) {
@@ -447,9 +691,9 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
447 * Special Purpose VIF in PIM 691 * Special Purpose VIF in PIM
448 * All the packets will be sent to the daemon 692 * All the packets will be sent to the daemon
449 */ 693 */
450 if (net->ipv4.mroute_reg_vif_num >= 0) 694 if (mrt->mroute_reg_vif_num >= 0)
451 return -EADDRINUSE; 695 return -EADDRINUSE;
452 dev = ipmr_reg_vif(net); 696 dev = ipmr_reg_vif(net, mrt);
453 if (!dev) 697 if (!dev)
454 return -ENOBUFS; 698 return -ENOBUFS;
455 err = dev_set_allmulti(dev, 1); 699 err = dev_set_allmulti(dev, 1);
@@ -525,49 +769,47 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
525 v->dev = dev; 769 v->dev = dev;
526#ifdef CONFIG_IP_PIMSM 770#ifdef CONFIG_IP_PIMSM
527 if (v->flags&VIFF_REGISTER) 771 if (v->flags&VIFF_REGISTER)
528 net->ipv4.mroute_reg_vif_num = vifi; 772 mrt->mroute_reg_vif_num = vifi;
529#endif 773#endif
530 if (vifi+1 > net->ipv4.maxvif) 774 if (vifi+1 > mrt->maxvif)
531 net->ipv4.maxvif = vifi+1; 775 mrt->maxvif = vifi+1;
532 write_unlock_bh(&mrt_lock); 776 write_unlock_bh(&mrt_lock);
533 return 0; 777 return 0;
534} 778}
535 779
536static struct mfc_cache *ipmr_cache_find(struct net *net, 780static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
537 __be32 origin, 781 __be32 origin,
538 __be32 mcastgrp) 782 __be32 mcastgrp)
539{ 783{
540 int line = MFC_HASH(mcastgrp, origin); 784 int line = MFC_HASH(mcastgrp, origin);
541 struct mfc_cache *c; 785 struct mfc_cache *c;
542 786
543 for (c = net->ipv4.mfc_cache_array[line]; c; c = c->next) { 787 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
544 if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp) 788 if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
545 break; 789 return c;
546 } 790 }
547 return c; 791 return NULL;
548} 792}
549 793
550/* 794/*
551 * Allocate a multicast cache entry 795 * Allocate a multicast cache entry
552 */ 796 */
553static struct mfc_cache *ipmr_cache_alloc(struct net *net) 797static struct mfc_cache *ipmr_cache_alloc(void)
554{ 798{
555 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL); 799 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
556 if (c == NULL) 800 if (c == NULL)
557 return NULL; 801 return NULL;
558 c->mfc_un.res.minvif = MAXVIFS; 802 c->mfc_un.res.minvif = MAXVIFS;
559 mfc_net_set(c, net);
560 return c; 803 return c;
561} 804}
562 805
563static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net) 806static struct mfc_cache *ipmr_cache_alloc_unres(void)
564{ 807{
565 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC); 808 struct mfc_cache *c = kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
566 if (c == NULL) 809 if (c == NULL)
567 return NULL; 810 return NULL;
568 skb_queue_head_init(&c->mfc_un.unres.unresolved); 811 skb_queue_head_init(&c->mfc_un.unres.unresolved);
569 c->mfc_un.unres.expires = jiffies + 10*HZ; 812 c->mfc_un.unres.expires = jiffies + 10*HZ;
570 mfc_net_set(c, net);
571 return c; 813 return c;
572} 814}
573 815
@@ -575,7 +817,8 @@ static struct mfc_cache *ipmr_cache_alloc_unres(struct net *net)
575 * A cache entry has gone into a resolved state from queued 817 * A cache entry has gone into a resolved state from queued
576 */ 818 */
577 819
578static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c) 820static void ipmr_cache_resolve(struct net *net, struct mr_table *mrt,
821 struct mfc_cache *uc, struct mfc_cache *c)
579{ 822{
580 struct sk_buff *skb; 823 struct sk_buff *skb;
581 struct nlmsgerr *e; 824 struct nlmsgerr *e;
@@ -588,7 +831,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
588 if (ip_hdr(skb)->version == 0) { 831 if (ip_hdr(skb)->version == 0) {
589 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); 832 struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
590 833
591 if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) { 834 if (ipmr_fill_mroute(mrt, skb, c, NLMSG_DATA(nlh)) > 0) {
592 nlh->nlmsg_len = (skb_tail_pointer(skb) - 835 nlh->nlmsg_len = (skb_tail_pointer(skb) -
593 (u8 *)nlh); 836 (u8 *)nlh);
594 } else { 837 } else {
@@ -600,9 +843,9 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
600 memset(&e->msg, 0, sizeof(e->msg)); 843 memset(&e->msg, 0, sizeof(e->msg));
601 } 844 }
602 845
603 rtnl_unicast(skb, mfc_net(c), NETLINK_CB(skb).pid); 846 rtnl_unicast(skb, net, NETLINK_CB(skb).pid);
604 } else 847 } else
605 ip_mr_forward(skb, c, 0); 848 ip_mr_forward(net, mrt, skb, c, 0);
606 } 849 }
607} 850}
608 851
@@ -613,7 +856,7 @@ static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
613 * Called under mrt_lock. 856 * Called under mrt_lock.
614 */ 857 */
615 858
616static int ipmr_cache_report(struct net *net, 859static int ipmr_cache_report(struct mr_table *mrt,
617 struct sk_buff *pkt, vifi_t vifi, int assert) 860 struct sk_buff *pkt, vifi_t vifi, int assert)
618{ 861{
619 struct sk_buff *skb; 862 struct sk_buff *skb;
@@ -646,7 +889,7 @@ static int ipmr_cache_report(struct net *net,
646 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr)); 889 memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
647 msg->im_msgtype = IGMPMSG_WHOLEPKT; 890 msg->im_msgtype = IGMPMSG_WHOLEPKT;
648 msg->im_mbz = 0; 891 msg->im_mbz = 0;
649 msg->im_vif = net->ipv4.mroute_reg_vif_num; 892 msg->im_vif = mrt->mroute_reg_vif_num;
650 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2; 893 ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
651 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) + 894 ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
652 sizeof(struct iphdr)); 895 sizeof(struct iphdr));
@@ -678,7 +921,7 @@ static int ipmr_cache_report(struct net *net,
678 skb->transport_header = skb->network_header; 921 skb->transport_header = skb->network_header;
679 } 922 }
680 923
681 if (net->ipv4.mroute_sk == NULL) { 924 if (mrt->mroute_sk == NULL) {
682 kfree_skb(skb); 925 kfree_skb(skb);
683 return -EINVAL; 926 return -EINVAL;
684 } 927 }
@@ -686,7 +929,7 @@ static int ipmr_cache_report(struct net *net,
686 /* 929 /*
687 * Deliver to mrouted 930 * Deliver to mrouted
688 */ 931 */
689 ret = sock_queue_rcv_skb(net->ipv4.mroute_sk, skb); 932 ret = sock_queue_rcv_skb(mrt->mroute_sk, skb);
690 if (ret < 0) { 933 if (ret < 0) {
691 if (net_ratelimit()) 934 if (net_ratelimit())
692 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n"); 935 printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
@@ -701,27 +944,29 @@ static int ipmr_cache_report(struct net *net,
701 */ 944 */
702 945
703static int 946static int
704ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb) 947ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi, struct sk_buff *skb)
705{ 948{
949 bool found = false;
706 int err; 950 int err;
707 struct mfc_cache *c; 951 struct mfc_cache *c;
708 const struct iphdr *iph = ip_hdr(skb); 952 const struct iphdr *iph = ip_hdr(skb);
709 953
710 spin_lock_bh(&mfc_unres_lock); 954 spin_lock_bh(&mfc_unres_lock);
711 for (c=mfc_unres_queue; c; c=c->next) { 955 list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
712 if (net_eq(mfc_net(c), net) && 956 if (c->mfc_mcastgrp == iph->daddr &&
713 c->mfc_mcastgrp == iph->daddr && 957 c->mfc_origin == iph->saddr) {
714 c->mfc_origin == iph->saddr) 958 found = true;
715 break; 959 break;
960 }
716 } 961 }
717 962
718 if (c == NULL) { 963 if (!found) {
719 /* 964 /*
720 * Create a new entry if allowable 965 * Create a new entry if allowable
721 */ 966 */
722 967
723 if (atomic_read(&net->ipv4.cache_resolve_queue_len) >= 10 || 968 if (atomic_read(&mrt->cache_resolve_queue_len) >= 10 ||
724 (c = ipmr_cache_alloc_unres(net)) == NULL) { 969 (c = ipmr_cache_alloc_unres()) == NULL) {
725 spin_unlock_bh(&mfc_unres_lock); 970 spin_unlock_bh(&mfc_unres_lock);
726 971
727 kfree_skb(skb); 972 kfree_skb(skb);
@@ -738,7 +983,7 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
738 /* 983 /*
739 * Reflect first query at mrouted. 984 * Reflect first query at mrouted.
740 */ 985 */
741 err = ipmr_cache_report(net, skb, vifi, IGMPMSG_NOCACHE); 986 err = ipmr_cache_report(mrt, skb, vifi, IGMPMSG_NOCACHE);
742 if (err < 0) { 987 if (err < 0) {
743 /* If the report failed throw the cache entry 988 /* If the report failed throw the cache entry
744 out - Brad Parker 989 out - Brad Parker
@@ -750,11 +995,10 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
750 return err; 995 return err;
751 } 996 }
752 997
753 atomic_inc(&net->ipv4.cache_resolve_queue_len); 998 atomic_inc(&mrt->cache_resolve_queue_len);
754 c->next = mfc_unres_queue; 999 list_add(&c->list, &mrt->mfc_unres_queue);
755 mfc_unres_queue = c;
756 1000
757 mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires); 1001 mod_timer(&mrt->ipmr_expire_timer, c->mfc_un.unres.expires);
758 } 1002 }
759 1003
760 /* 1004 /*
@@ -776,19 +1020,18 @@ ipmr_cache_unresolved(struct net *net, vifi_t vifi, struct sk_buff *skb)
776 * MFC cache manipulation by user space mroute daemon 1020 * MFC cache manipulation by user space mroute daemon
777 */ 1021 */
778 1022
779static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc) 1023static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc)
780{ 1024{
781 int line; 1025 int line;
782 struct mfc_cache *c, **cp; 1026 struct mfc_cache *c, *next;
783 1027
784 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1028 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
785 1029
786 for (cp = &net->ipv4.mfc_cache_array[line]; 1030 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
787 (c = *cp) != NULL; cp = &c->next) {
788 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1031 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
789 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) { 1032 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
790 write_lock_bh(&mrt_lock); 1033 write_lock_bh(&mrt_lock);
791 *cp = c->next; 1034 list_del(&c->list);
792 write_unlock_bh(&mrt_lock); 1035 write_unlock_bh(&mrt_lock);
793 1036
794 ipmr_cache_free(c); 1037 ipmr_cache_free(c);
@@ -798,27 +1041,30 @@ static int ipmr_mfc_delete(struct net *net, struct mfcctl *mfc)
798 return -ENOENT; 1041 return -ENOENT;
799} 1042}
800 1043
801static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock) 1044static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
1045 struct mfcctl *mfc, int mrtsock)
802{ 1046{
1047 bool found = false;
803 int line; 1048 int line;
804 struct mfc_cache *uc, *c, **cp; 1049 struct mfc_cache *uc, *c;
805 1050
806 if (mfc->mfcc_parent >= MAXVIFS) 1051 if (mfc->mfcc_parent >= MAXVIFS)
807 return -ENFILE; 1052 return -ENFILE;
808 1053
809 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr); 1054 line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
810 1055
811 for (cp = &net->ipv4.mfc_cache_array[line]; 1056 list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
812 (c = *cp) != NULL; cp = &c->next) {
813 if (c->mfc_origin == mfc->mfcc_origin.s_addr && 1057 if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
814 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) 1058 c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
1059 found = true;
815 break; 1060 break;
1061 }
816 } 1062 }
817 1063
818 if (c != NULL) { 1064 if (found) {
819 write_lock_bh(&mrt_lock); 1065 write_lock_bh(&mrt_lock);
820 c->mfc_parent = mfc->mfcc_parent; 1066 c->mfc_parent = mfc->mfcc_parent;
821 ipmr_update_thresholds(c, mfc->mfcc_ttls); 1067 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
822 if (!mrtsock) 1068 if (!mrtsock)
823 c->mfc_flags |= MFC_STATIC; 1069 c->mfc_flags |= MFC_STATIC;
824 write_unlock_bh(&mrt_lock); 1070 write_unlock_bh(&mrt_lock);
@@ -828,43 +1074,42 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
828 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr)) 1074 if (!ipv4_is_multicast(mfc->mfcc_mcastgrp.s_addr))
829 return -EINVAL; 1075 return -EINVAL;
830 1076
831 c = ipmr_cache_alloc(net); 1077 c = ipmr_cache_alloc();
832 if (c == NULL) 1078 if (c == NULL)
833 return -ENOMEM; 1079 return -ENOMEM;
834 1080
835 c->mfc_origin = mfc->mfcc_origin.s_addr; 1081 c->mfc_origin = mfc->mfcc_origin.s_addr;
836 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr; 1082 c->mfc_mcastgrp = mfc->mfcc_mcastgrp.s_addr;
837 c->mfc_parent = mfc->mfcc_parent; 1083 c->mfc_parent = mfc->mfcc_parent;
838 ipmr_update_thresholds(c, mfc->mfcc_ttls); 1084 ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
839 if (!mrtsock) 1085 if (!mrtsock)
840 c->mfc_flags |= MFC_STATIC; 1086 c->mfc_flags |= MFC_STATIC;
841 1087
842 write_lock_bh(&mrt_lock); 1088 write_lock_bh(&mrt_lock);
843 c->next = net->ipv4.mfc_cache_array[line]; 1089 list_add(&c->list, &mrt->mfc_cache_array[line]);
844 net->ipv4.mfc_cache_array[line] = c;
845 write_unlock_bh(&mrt_lock); 1090 write_unlock_bh(&mrt_lock);
846 1091
847 /* 1092 /*
848 * Check to see if we resolved a queued list. If so we 1093 * Check to see if we resolved a queued list. If so we
849 * need to send on the frames and tidy up. 1094 * need to send on the frames and tidy up.
850 */ 1095 */
1096 found = false;
851 spin_lock_bh(&mfc_unres_lock); 1097 spin_lock_bh(&mfc_unres_lock);
852 for (cp = &mfc_unres_queue; (uc=*cp) != NULL; 1098 list_for_each_entry(uc, &mrt->mfc_unres_queue, list) {
853 cp = &uc->next) { 1099 if (uc->mfc_origin == c->mfc_origin &&
854 if (net_eq(mfc_net(uc), net) &&
855 uc->mfc_origin == c->mfc_origin &&
856 uc->mfc_mcastgrp == c->mfc_mcastgrp) { 1100 uc->mfc_mcastgrp == c->mfc_mcastgrp) {
857 *cp = uc->next; 1101 list_del(&uc->list);
858 atomic_dec(&net->ipv4.cache_resolve_queue_len); 1102 atomic_dec(&mrt->cache_resolve_queue_len);
1103 found = true;
859 break; 1104 break;
860 } 1105 }
861 } 1106 }
862 if (mfc_unres_queue == NULL) 1107 if (list_empty(&mrt->mfc_unres_queue))
863 del_timer(&ipmr_expire_timer); 1108 del_timer(&mrt->ipmr_expire_timer);
864 spin_unlock_bh(&mfc_unres_lock); 1109 spin_unlock_bh(&mfc_unres_lock);
865 1110
866 if (uc) { 1111 if (found) {
867 ipmr_cache_resolve(uc, c); 1112 ipmr_cache_resolve(net, mrt, uc, c);
868 ipmr_cache_free(uc); 1113 ipmr_cache_free(uc);
869 } 1114 }
870 return 0; 1115 return 0;
@@ -874,53 +1119,41 @@ static int ipmr_mfc_add(struct net *net, struct mfcctl *mfc, int mrtsock)
874 * Close the multicast socket, and clear the vif tables etc 1119 * Close the multicast socket, and clear the vif tables etc
875 */ 1120 */
876 1121
877static void mroute_clean_tables(struct net *net) 1122static void mroute_clean_tables(struct mr_table *mrt)
878{ 1123{
879 int i; 1124 int i;
880 LIST_HEAD(list); 1125 LIST_HEAD(list);
1126 struct mfc_cache *c, *next;
881 1127
882 /* 1128 /*
883 * Shut down all active vif entries 1129 * Shut down all active vif entries
884 */ 1130 */
885 for (i = 0; i < net->ipv4.maxvif; i++) { 1131 for (i = 0; i < mrt->maxvif; i++) {
886 if (!(net->ipv4.vif_table[i].flags&VIFF_STATIC)) 1132 if (!(mrt->vif_table[i].flags&VIFF_STATIC))
887 vif_delete(net, i, 0, &list); 1133 vif_delete(mrt, i, 0, &list);
888 } 1134 }
889 unregister_netdevice_many(&list); 1135 unregister_netdevice_many(&list);
890 1136
891 /* 1137 /*
892 * Wipe the cache 1138 * Wipe the cache
893 */ 1139 */
894 for (i=0; i<MFC_LINES; i++) { 1140 for (i = 0; i < MFC_LINES; i++) {
895 struct mfc_cache *c, **cp; 1141 list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
896 1142 if (c->mfc_flags&MFC_STATIC)
897 cp = &net->ipv4.mfc_cache_array[i];
898 while ((c = *cp) != NULL) {
899 if (c->mfc_flags&MFC_STATIC) {
900 cp = &c->next;
901 continue; 1143 continue;
902 }
903 write_lock_bh(&mrt_lock); 1144 write_lock_bh(&mrt_lock);
904 *cp = c->next; 1145 list_del(&c->list);
905 write_unlock_bh(&mrt_lock); 1146 write_unlock_bh(&mrt_lock);
906 1147
907 ipmr_cache_free(c); 1148 ipmr_cache_free(c);
908 } 1149 }
909 } 1150 }
910 1151
911 if (atomic_read(&net->ipv4.cache_resolve_queue_len) != 0) { 1152 if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
912 struct mfc_cache *c, **cp;
913
914 spin_lock_bh(&mfc_unres_lock); 1153 spin_lock_bh(&mfc_unres_lock);
915 cp = &mfc_unres_queue; 1154 list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
916 while ((c = *cp) != NULL) { 1155 list_del(&c->list);
917 if (!net_eq(mfc_net(c), net)) { 1156 ipmr_destroy_unres(mrt, c);
918 cp = &c->next;
919 continue;
920 }
921 *cp = c->next;
922
923 ipmr_destroy_unres(c);
924 } 1157 }
925 spin_unlock_bh(&mfc_unres_lock); 1158 spin_unlock_bh(&mfc_unres_lock);
926 } 1159 }
@@ -929,16 +1162,19 @@ static void mroute_clean_tables(struct net *net)
929static void mrtsock_destruct(struct sock *sk) 1162static void mrtsock_destruct(struct sock *sk)
930{ 1163{
931 struct net *net = sock_net(sk); 1164 struct net *net = sock_net(sk);
1165 struct mr_table *mrt;
932 1166
933 rtnl_lock(); 1167 rtnl_lock();
934 if (sk == net->ipv4.mroute_sk) { 1168 ipmr_for_each_table(mrt, net) {
935 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--; 1169 if (sk == mrt->mroute_sk) {
1170 IPV4_DEVCONF_ALL(net, MC_FORWARDING)--;
936 1171
937 write_lock_bh(&mrt_lock); 1172 write_lock_bh(&mrt_lock);
938 net->ipv4.mroute_sk = NULL; 1173 mrt->mroute_sk = NULL;
939 write_unlock_bh(&mrt_lock); 1174 write_unlock_bh(&mrt_lock);
940 1175
941 mroute_clean_tables(net); 1176 mroute_clean_tables(mrt);
1177 }
942 } 1178 }
943 rtnl_unlock(); 1179 rtnl_unlock();
944} 1180}
@@ -956,9 +1192,14 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
956 struct vifctl vif; 1192 struct vifctl vif;
957 struct mfcctl mfc; 1193 struct mfcctl mfc;
958 struct net *net = sock_net(sk); 1194 struct net *net = sock_net(sk);
1195 struct mr_table *mrt;
1196
1197 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1198 if (mrt == NULL)
1199 return -ENOENT;
959 1200
960 if (optname != MRT_INIT) { 1201 if (optname != MRT_INIT) {
961 if (sk != net->ipv4.mroute_sk && !capable(CAP_NET_ADMIN)) 1202 if (sk != mrt->mroute_sk && !capable(CAP_NET_ADMIN))
962 return -EACCES; 1203 return -EACCES;
963 } 1204 }
964 1205
@@ -971,7 +1212,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
971 return -ENOPROTOOPT; 1212 return -ENOPROTOOPT;
972 1213
973 rtnl_lock(); 1214 rtnl_lock();
974 if (net->ipv4.mroute_sk) { 1215 if (mrt->mroute_sk) {
975 rtnl_unlock(); 1216 rtnl_unlock();
976 return -EADDRINUSE; 1217 return -EADDRINUSE;
977 } 1218 }
@@ -979,7 +1220,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
979 ret = ip_ra_control(sk, 1, mrtsock_destruct); 1220 ret = ip_ra_control(sk, 1, mrtsock_destruct);
980 if (ret == 0) { 1221 if (ret == 0) {
981 write_lock_bh(&mrt_lock); 1222 write_lock_bh(&mrt_lock);
982 net->ipv4.mroute_sk = sk; 1223 mrt->mroute_sk = sk;
983 write_unlock_bh(&mrt_lock); 1224 write_unlock_bh(&mrt_lock);
984 1225
985 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++; 1226 IPV4_DEVCONF_ALL(net, MC_FORWARDING)++;
@@ -987,7 +1228,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
987 rtnl_unlock(); 1228 rtnl_unlock();
988 return ret; 1229 return ret;
989 case MRT_DONE: 1230 case MRT_DONE:
990 if (sk != net->ipv4.mroute_sk) 1231 if (sk != mrt->mroute_sk)
991 return -EACCES; 1232 return -EACCES;
992 return ip_ra_control(sk, 0, NULL); 1233 return ip_ra_control(sk, 0, NULL);
993 case MRT_ADD_VIF: 1234 case MRT_ADD_VIF:
@@ -1000,9 +1241,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1000 return -ENFILE; 1241 return -ENFILE;
1001 rtnl_lock(); 1242 rtnl_lock();
1002 if (optname == MRT_ADD_VIF) { 1243 if (optname == MRT_ADD_VIF) {
1003 ret = vif_add(net, &vif, sk == net->ipv4.mroute_sk); 1244 ret = vif_add(net, mrt, &vif, sk == mrt->mroute_sk);
1004 } else { 1245 } else {
1005 ret = vif_delete(net, vif.vifc_vifi, 0, NULL); 1246 ret = vif_delete(mrt, vif.vifc_vifi, 0, NULL);
1006 } 1247 }
1007 rtnl_unlock(); 1248 rtnl_unlock();
1008 return ret; 1249 return ret;
@@ -1019,9 +1260,9 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1019 return -EFAULT; 1260 return -EFAULT;
1020 rtnl_lock(); 1261 rtnl_lock();
1021 if (optname == MRT_DEL_MFC) 1262 if (optname == MRT_DEL_MFC)
1022 ret = ipmr_mfc_delete(net, &mfc); 1263 ret = ipmr_mfc_delete(mrt, &mfc);
1023 else 1264 else
1024 ret = ipmr_mfc_add(net, &mfc, sk == net->ipv4.mroute_sk); 1265 ret = ipmr_mfc_add(net, mrt, &mfc, sk == mrt->mroute_sk);
1025 rtnl_unlock(); 1266 rtnl_unlock();
1026 return ret; 1267 return ret;
1027 /* 1268 /*
@@ -1032,7 +1273,7 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1032 int v; 1273 int v;
1033 if (get_user(v,(int __user *)optval)) 1274 if (get_user(v,(int __user *)optval))
1034 return -EFAULT; 1275 return -EFAULT;
1035 net->ipv4.mroute_do_assert = (v) ? 1 : 0; 1276 mrt->mroute_do_assert = (v) ? 1 : 0;
1036 return 0; 1277 return 0;
1037 } 1278 }
1038#ifdef CONFIG_IP_PIMSM 1279#ifdef CONFIG_IP_PIMSM
@@ -1046,14 +1287,35 @@ int ip_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, unsi
1046 1287
1047 rtnl_lock(); 1288 rtnl_lock();
1048 ret = 0; 1289 ret = 0;
1049 if (v != net->ipv4.mroute_do_pim) { 1290 if (v != mrt->mroute_do_pim) {
1050 net->ipv4.mroute_do_pim = v; 1291 mrt->mroute_do_pim = v;
1051 net->ipv4.mroute_do_assert = v; 1292 mrt->mroute_do_assert = v;
1052 } 1293 }
1053 rtnl_unlock(); 1294 rtnl_unlock();
1054 return ret; 1295 return ret;
1055 } 1296 }
1056#endif 1297#endif
1298#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
1299 case MRT_TABLE:
1300 {
1301 u32 v;
1302
1303 if (optlen != sizeof(u32))
1304 return -EINVAL;
1305 if (get_user(v, (u32 __user *)optval))
1306 return -EFAULT;
1307 if (sk == mrt->mroute_sk)
1308 return -EBUSY;
1309
1310 rtnl_lock();
1311 ret = 0;
1312 if (!ipmr_new_table(net, v))
1313 ret = -ENOMEM;
1314 raw_sk(sk)->ipmr_table = v;
1315 rtnl_unlock();
1316 return ret;
1317 }
1318#endif
1057 /* 1319 /*
1058 * Spurious command, or MRT_VERSION which you cannot 1320 * Spurious command, or MRT_VERSION which you cannot
1059 * set. 1321 * set.
@@ -1072,6 +1334,11 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1072 int olr; 1334 int olr;
1073 int val; 1335 int val;
1074 struct net *net = sock_net(sk); 1336 struct net *net = sock_net(sk);
1337 struct mr_table *mrt;
1338
1339 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1340 if (mrt == NULL)
1341 return -ENOENT;
1075 1342
1076 if (optname != MRT_VERSION && 1343 if (optname != MRT_VERSION &&
1077#ifdef CONFIG_IP_PIMSM 1344#ifdef CONFIG_IP_PIMSM
@@ -1093,10 +1360,10 @@ int ip_mroute_getsockopt(struct sock *sk, int optname, char __user *optval, int
1093 val = 0x0305; 1360 val = 0x0305;
1094#ifdef CONFIG_IP_PIMSM 1361#ifdef CONFIG_IP_PIMSM
1095 else if (optname == MRT_PIM) 1362 else if (optname == MRT_PIM)
1096 val = net->ipv4.mroute_do_pim; 1363 val = mrt->mroute_do_pim;
1097#endif 1364#endif
1098 else 1365 else
1099 val = net->ipv4.mroute_do_assert; 1366 val = mrt->mroute_do_assert;
1100 if (copy_to_user(optval, &val, olr)) 1367 if (copy_to_user(optval, &val, olr))
1101 return -EFAULT; 1368 return -EFAULT;
1102 return 0; 1369 return 0;
@@ -1113,16 +1380,21 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1113 struct vif_device *vif; 1380 struct vif_device *vif;
1114 struct mfc_cache *c; 1381 struct mfc_cache *c;
1115 struct net *net = sock_net(sk); 1382 struct net *net = sock_net(sk);
1383 struct mr_table *mrt;
1384
1385 mrt = ipmr_get_table(net, raw_sk(sk)->ipmr_table ? : RT_TABLE_DEFAULT);
1386 if (mrt == NULL)
1387 return -ENOENT;
1116 1388
1117 switch (cmd) { 1389 switch (cmd) {
1118 case SIOCGETVIFCNT: 1390 case SIOCGETVIFCNT:
1119 if (copy_from_user(&vr, arg, sizeof(vr))) 1391 if (copy_from_user(&vr, arg, sizeof(vr)))
1120 return -EFAULT; 1392 return -EFAULT;
1121 if (vr.vifi >= net->ipv4.maxvif) 1393 if (vr.vifi >= mrt->maxvif)
1122 return -EINVAL; 1394 return -EINVAL;
1123 read_lock(&mrt_lock); 1395 read_lock(&mrt_lock);
1124 vif = &net->ipv4.vif_table[vr.vifi]; 1396 vif = &mrt->vif_table[vr.vifi];
1125 if (VIF_EXISTS(net, vr.vifi)) { 1397 if (VIF_EXISTS(mrt, vr.vifi)) {
1126 vr.icount = vif->pkt_in; 1398 vr.icount = vif->pkt_in;
1127 vr.ocount = vif->pkt_out; 1399 vr.ocount = vif->pkt_out;
1128 vr.ibytes = vif->bytes_in; 1400 vr.ibytes = vif->bytes_in;
@@ -1140,7 +1412,7 @@ int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
1140 return -EFAULT; 1412 return -EFAULT;
1141 1413
1142 read_lock(&mrt_lock); 1414 read_lock(&mrt_lock);
1143 c = ipmr_cache_find(net, sr.src.s_addr, sr.grp.s_addr); 1415 c = ipmr_cache_find(mrt, sr.src.s_addr, sr.grp.s_addr);
1144 if (c) { 1416 if (c) {
1145 sr.pktcnt = c->mfc_un.res.pkt; 1417 sr.pktcnt = c->mfc_un.res.pkt;
1146 sr.bytecnt = c->mfc_un.res.bytes; 1418 sr.bytecnt = c->mfc_un.res.bytes;
@@ -1163,16 +1435,20 @@ static int ipmr_device_event(struct notifier_block *this, unsigned long event, v
1163{ 1435{
1164 struct net_device *dev = ptr; 1436 struct net_device *dev = ptr;
1165 struct net *net = dev_net(dev); 1437 struct net *net = dev_net(dev);
1438 struct mr_table *mrt;
1166 struct vif_device *v; 1439 struct vif_device *v;
1167 int ct; 1440 int ct;
1168 LIST_HEAD(list); 1441 LIST_HEAD(list);
1169 1442
1170 if (event != NETDEV_UNREGISTER) 1443 if (event != NETDEV_UNREGISTER)
1171 return NOTIFY_DONE; 1444 return NOTIFY_DONE;
1172 v = &net->ipv4.vif_table[0]; 1445
1173 for (ct = 0; ct < net->ipv4.maxvif; ct++, v++) { 1446 ipmr_for_each_table(mrt, net) {
1174 if (v->dev == dev) 1447 v = &mrt->vif_table[0];
1175 vif_delete(net, ct, 1, &list); 1448 for (ct = 0; ct < mrt->maxvif; ct++, v++) {
1449 if (v->dev == dev)
1450 vif_delete(mrt, ct, 1, &list);
1451 }
1176 } 1452 }
1177 unregister_netdevice_many(&list); 1453 unregister_netdevice_many(&list);
1178 return NOTIFY_DONE; 1454 return NOTIFY_DONE;
@@ -1231,11 +1507,11 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1231 * Processing handlers for ipmr_forward 1507 * Processing handlers for ipmr_forward
1232 */ 1508 */
1233 1509
1234static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi) 1510static void ipmr_queue_xmit(struct net *net, struct mr_table *mrt,
1511 struct sk_buff *skb, struct mfc_cache *c, int vifi)
1235{ 1512{
1236 struct net *net = mfc_net(c);
1237 const struct iphdr *iph = ip_hdr(skb); 1513 const struct iphdr *iph = ip_hdr(skb);
1238 struct vif_device *vif = &net->ipv4.vif_table[vifi]; 1514 struct vif_device *vif = &mrt->vif_table[vifi];
1239 struct net_device *dev; 1515 struct net_device *dev;
1240 struct rtable *rt; 1516 struct rtable *rt;
1241 int encap = 0; 1517 int encap = 0;
@@ -1249,7 +1525,7 @@ static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
1249 vif->bytes_out += skb->len; 1525 vif->bytes_out += skb->len;
1250 vif->dev->stats.tx_bytes += skb->len; 1526 vif->dev->stats.tx_bytes += skb->len;
1251 vif->dev->stats.tx_packets++; 1527 vif->dev->stats.tx_packets++;
1252 ipmr_cache_report(net, skb, vifi, IGMPMSG_WHOLEPKT); 1528 ipmr_cache_report(mrt, skb, vifi, IGMPMSG_WHOLEPKT);
1253 goto out_free; 1529 goto out_free;
1254 } 1530 }
1255#endif 1531#endif
@@ -1332,12 +1608,12 @@ out_free:
1332 return; 1608 return;
1333} 1609}
1334 1610
1335static int ipmr_find_vif(struct net_device *dev) 1611static int ipmr_find_vif(struct mr_table *mrt, struct net_device *dev)
1336{ 1612{
1337 struct net *net = dev_net(dev);
1338 int ct; 1613 int ct;
1339 for (ct = net->ipv4.maxvif-1; ct >= 0; ct--) { 1614
1340 if (net->ipv4.vif_table[ct].dev == dev) 1615 for (ct = mrt->maxvif-1; ct >= 0; ct--) {
1616 if (mrt->vif_table[ct].dev == dev)
1341 break; 1617 break;
1342 } 1618 }
1343 return ct; 1619 return ct;
@@ -1345,11 +1621,12 @@ static int ipmr_find_vif(struct net_device *dev)
1345 1621
1346/* "local" means that we should preserve one skb (for local delivery) */ 1622/* "local" means that we should preserve one skb (for local delivery) */
1347 1623
1348static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local) 1624static int ip_mr_forward(struct net *net, struct mr_table *mrt,
1625 struct sk_buff *skb, struct mfc_cache *cache,
1626 int local)
1349{ 1627{
1350 int psend = -1; 1628 int psend = -1;
1351 int vif, ct; 1629 int vif, ct;
1352 struct net *net = mfc_net(cache);
1353 1630
1354 vif = cache->mfc_parent; 1631 vif = cache->mfc_parent;
1355 cache->mfc_un.res.pkt++; 1632 cache->mfc_un.res.pkt++;
@@ -1358,7 +1635,7 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1358 /* 1635 /*
1359 * Wrong interface: drop packet and (maybe) send PIM assert. 1636 * Wrong interface: drop packet and (maybe) send PIM assert.
1360 */ 1637 */
1361 if (net->ipv4.vif_table[vif].dev != skb->dev) { 1638 if (mrt->vif_table[vif].dev != skb->dev) {
1362 int true_vifi; 1639 int true_vifi;
1363 1640
1364 if (skb_rtable(skb)->fl.iif == 0) { 1641 if (skb_rtable(skb)->fl.iif == 0) {
@@ -1377,26 +1654,26 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1377 } 1654 }
1378 1655
1379 cache->mfc_un.res.wrong_if++; 1656 cache->mfc_un.res.wrong_if++;
1380 true_vifi = ipmr_find_vif(skb->dev); 1657 true_vifi = ipmr_find_vif(mrt, skb->dev);
1381 1658
1382 if (true_vifi >= 0 && net->ipv4.mroute_do_assert && 1659 if (true_vifi >= 0 && mrt->mroute_do_assert &&
1383 /* pimsm uses asserts, when switching from RPT to SPT, 1660 /* pimsm uses asserts, when switching from RPT to SPT,
1384 so that we cannot check that packet arrived on an oif. 1661 so that we cannot check that packet arrived on an oif.
1385 It is bad, but otherwise we would need to move pretty 1662 It is bad, but otherwise we would need to move pretty
1386 large chunk of pimd to kernel. Ough... --ANK 1663 large chunk of pimd to kernel. Ough... --ANK
1387 */ 1664 */
1388 (net->ipv4.mroute_do_pim || 1665 (mrt->mroute_do_pim ||
1389 cache->mfc_un.res.ttls[true_vifi] < 255) && 1666 cache->mfc_un.res.ttls[true_vifi] < 255) &&
1390 time_after(jiffies, 1667 time_after(jiffies,
1391 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) { 1668 cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
1392 cache->mfc_un.res.last_assert = jiffies; 1669 cache->mfc_un.res.last_assert = jiffies;
1393 ipmr_cache_report(net, skb, true_vifi, IGMPMSG_WRONGVIF); 1670 ipmr_cache_report(mrt, skb, true_vifi, IGMPMSG_WRONGVIF);
1394 } 1671 }
1395 goto dont_forward; 1672 goto dont_forward;
1396 } 1673 }
1397 1674
1398 net->ipv4.vif_table[vif].pkt_in++; 1675 mrt->vif_table[vif].pkt_in++;
1399 net->ipv4.vif_table[vif].bytes_in += skb->len; 1676 mrt->vif_table[vif].bytes_in += skb->len;
1400 1677
1401 /* 1678 /*
1402 * Forward the frame 1679 * Forward the frame
@@ -1406,7 +1683,8 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1406 if (psend != -1) { 1683 if (psend != -1) {
1407 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1684 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1408 if (skb2) 1685 if (skb2)
1409 ipmr_queue_xmit(skb2, cache, psend); 1686 ipmr_queue_xmit(net, mrt, skb2, cache,
1687 psend);
1410 } 1688 }
1411 psend = ct; 1689 psend = ct;
1412 } 1690 }
@@ -1415,9 +1693,9 @@ static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local
1415 if (local) { 1693 if (local) {
1416 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC); 1694 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
1417 if (skb2) 1695 if (skb2)
1418 ipmr_queue_xmit(skb2, cache, psend); 1696 ipmr_queue_xmit(net, mrt, skb2, cache, psend);
1419 } else { 1697 } else {
1420 ipmr_queue_xmit(skb, cache, psend); 1698 ipmr_queue_xmit(net, mrt, skb, cache, psend);
1421 return 0; 1699 return 0;
1422 } 1700 }
1423 } 1701 }
@@ -1438,6 +1716,8 @@ int ip_mr_input(struct sk_buff *skb)
1438 struct mfc_cache *cache; 1716 struct mfc_cache *cache;
1439 struct net *net = dev_net(skb->dev); 1717 struct net *net = dev_net(skb->dev);
1440 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL; 1718 int local = skb_rtable(skb)->rt_flags & RTCF_LOCAL;
1719 struct mr_table *mrt;
1720 int err;
1441 1721
1442 /* Packet is looped back after forward, it should not be 1722 /* Packet is looped back after forward, it should not be
1443 forwarded second time, but still can be delivered locally. 1723 forwarded second time, but still can be delivered locally.
@@ -1445,6 +1725,10 @@ int ip_mr_input(struct sk_buff *skb)
1445 if (IPCB(skb)->flags&IPSKB_FORWARDED) 1725 if (IPCB(skb)->flags&IPSKB_FORWARDED)
1446 goto dont_forward; 1726 goto dont_forward;
1447 1727
1728 err = ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt);
1729 if (err < 0)
1730 return err;
1731
1448 if (!local) { 1732 if (!local) {
1449 if (IPCB(skb)->opt.router_alert) { 1733 if (IPCB(skb)->opt.router_alert) {
1450 if (ip_call_ra_chain(skb)) 1734 if (ip_call_ra_chain(skb))
@@ -1457,9 +1741,9 @@ int ip_mr_input(struct sk_buff *skb)
1457 that we can forward NO IGMP messages. 1741 that we can forward NO IGMP messages.
1458 */ 1742 */
1459 read_lock(&mrt_lock); 1743 read_lock(&mrt_lock);
1460 if (net->ipv4.mroute_sk) { 1744 if (mrt->mroute_sk) {
1461 nf_reset(skb); 1745 nf_reset(skb);
1462 raw_rcv(net->ipv4.mroute_sk, skb); 1746 raw_rcv(mrt->mroute_sk, skb);
1463 read_unlock(&mrt_lock); 1747 read_unlock(&mrt_lock);
1464 return 0; 1748 return 0;
1465 } 1749 }
@@ -1468,7 +1752,7 @@ int ip_mr_input(struct sk_buff *skb)
1468 } 1752 }
1469 1753
1470 read_lock(&mrt_lock); 1754 read_lock(&mrt_lock);
1471 cache = ipmr_cache_find(net, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr); 1755 cache = ipmr_cache_find(mrt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
1472 1756
1473 /* 1757 /*
1474 * No usable cache entry 1758 * No usable cache entry
@@ -1486,19 +1770,19 @@ int ip_mr_input(struct sk_buff *skb)
1486 skb = skb2; 1770 skb = skb2;
1487 } 1771 }
1488 1772
1489 vif = ipmr_find_vif(skb->dev); 1773 vif = ipmr_find_vif(mrt, skb->dev);
1490 if (vif >= 0) { 1774 if (vif >= 0) {
1491 int err = ipmr_cache_unresolved(net, vif, skb); 1775 int err2 = ipmr_cache_unresolved(mrt, vif, skb);
1492 read_unlock(&mrt_lock); 1776 read_unlock(&mrt_lock);
1493 1777
1494 return err; 1778 return err2;
1495 } 1779 }
1496 read_unlock(&mrt_lock); 1780 read_unlock(&mrt_lock);
1497 kfree_skb(skb); 1781 kfree_skb(skb);
1498 return -ENODEV; 1782 return -ENODEV;
1499 } 1783 }
1500 1784
1501 ip_mr_forward(skb, cache, local); 1785 ip_mr_forward(net, mrt, skb, cache, local);
1502 1786
1503 read_unlock(&mrt_lock); 1787 read_unlock(&mrt_lock);
1504 1788
@@ -1515,11 +1799,11 @@ dont_forward:
1515} 1799}
1516 1800
1517#ifdef CONFIG_IP_PIMSM 1801#ifdef CONFIG_IP_PIMSM
1518static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen) 1802static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb,
1803 unsigned int pimlen)
1519{ 1804{
1520 struct net_device *reg_dev = NULL; 1805 struct net_device *reg_dev = NULL;
1521 struct iphdr *encap; 1806 struct iphdr *encap;
1522 struct net *net = dev_net(skb->dev);
1523 1807
1524 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); 1808 encap = (struct iphdr *)(skb_transport_header(skb) + pimlen);
1525 /* 1809 /*
@@ -1534,8 +1818,8 @@ static int __pim_rcv(struct sk_buff *skb, unsigned int pimlen)
1534 return 1; 1818 return 1;
1535 1819
1536 read_lock(&mrt_lock); 1820 read_lock(&mrt_lock);
1537 if (net->ipv4.mroute_reg_vif_num >= 0) 1821 if (mrt->mroute_reg_vif_num >= 0)
1538 reg_dev = net->ipv4.vif_table[net->ipv4.mroute_reg_vif_num].dev; 1822 reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev;
1539 if (reg_dev) 1823 if (reg_dev)
1540 dev_hold(reg_dev); 1824 dev_hold(reg_dev);
1541 read_unlock(&mrt_lock); 1825 read_unlock(&mrt_lock);
@@ -1570,17 +1854,21 @@ int pim_rcv_v1(struct sk_buff * skb)
1570{ 1854{
1571 struct igmphdr *pim; 1855 struct igmphdr *pim;
1572 struct net *net = dev_net(skb->dev); 1856 struct net *net = dev_net(skb->dev);
1857 struct mr_table *mrt;
1573 1858
1574 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1859 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1575 goto drop; 1860 goto drop;
1576 1861
1577 pim = igmp_hdr(skb); 1862 pim = igmp_hdr(skb);
1578 1863
1579 if (!net->ipv4.mroute_do_pim || 1864 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1865 goto drop;
1866
1867 if (!mrt->mroute_do_pim ||
1580 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER) 1868 pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
1581 goto drop; 1869 goto drop;
1582 1870
1583 if (__pim_rcv(skb, sizeof(*pim))) { 1871 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1584drop: 1872drop:
1585 kfree_skb(skb); 1873 kfree_skb(skb);
1586 } 1874 }
@@ -1592,6 +1880,8 @@ drop:
1592static int pim_rcv(struct sk_buff * skb) 1880static int pim_rcv(struct sk_buff * skb)
1593{ 1881{
1594 struct pimreghdr *pim; 1882 struct pimreghdr *pim;
1883 struct net *net = dev_net(skb->dev);
1884 struct mr_table *mrt;
1595 1885
1596 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr))) 1886 if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(struct iphdr)))
1597 goto drop; 1887 goto drop;
@@ -1603,7 +1893,10 @@ static int pim_rcv(struct sk_buff * skb)
1603 csum_fold(skb_checksum(skb, 0, skb->len, 0)))) 1893 csum_fold(skb_checksum(skb, 0, skb->len, 0))))
1604 goto drop; 1894 goto drop;
1605 1895
1606 if (__pim_rcv(skb, sizeof(*pim))) { 1896 if (ipmr_fib_lookup(net, &skb_rtable(skb)->fl, &mrt) < 0)
1897 goto drop;
1898
1899 if (__pim_rcv(mrt, skb, sizeof(*pim))) {
1607drop: 1900drop:
1608 kfree_skb(skb); 1901 kfree_skb(skb);
1609 } 1902 }
@@ -1612,11 +1905,11 @@ drop:
1612#endif 1905#endif
1613 1906
1614static int 1907static int
1615ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm) 1908ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb, struct mfc_cache *c,
1909 struct rtmsg *rtm)
1616{ 1910{
1617 int ct; 1911 int ct;
1618 struct rtnexthop *nhp; 1912 struct rtnexthop *nhp;
1619 struct net *net = mfc_net(c);
1620 u8 *b = skb_tail_pointer(skb); 1913 u8 *b = skb_tail_pointer(skb);
1621 struct rtattr *mp_head; 1914 struct rtattr *mp_head;
1622 1915
@@ -1624,19 +1917,19 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
1624 if (c->mfc_parent > MAXVIFS) 1917 if (c->mfc_parent > MAXVIFS)
1625 return -ENOENT; 1918 return -ENOENT;
1626 1919
1627 if (VIF_EXISTS(net, c->mfc_parent)) 1920 if (VIF_EXISTS(mrt, c->mfc_parent))
1628 RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex); 1921 RTA_PUT(skb, RTA_IIF, 4, &mrt->vif_table[c->mfc_parent].dev->ifindex);
1629 1922
1630 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0)); 1923 mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
1631 1924
1632 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) { 1925 for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
1633 if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) { 1926 if (VIF_EXISTS(mrt, ct) && c->mfc_un.res.ttls[ct] < 255) {
1634 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 1927 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
1635 goto rtattr_failure; 1928 goto rtattr_failure;
1636 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 1929 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
1637 nhp->rtnh_flags = 0; 1930 nhp->rtnh_flags = 0;
1638 nhp->rtnh_hops = c->mfc_un.res.ttls[ct]; 1931 nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
1639 nhp->rtnh_ifindex = net->ipv4.vif_table[ct].dev->ifindex; 1932 nhp->rtnh_ifindex = mrt->vif_table[ct].dev->ifindex;
1640 nhp->rtnh_len = sizeof(*nhp); 1933 nhp->rtnh_len = sizeof(*nhp);
1641 } 1934 }
1642 } 1935 }
@@ -1654,11 +1947,16 @@ int ipmr_get_route(struct net *net,
1654 struct sk_buff *skb, struct rtmsg *rtm, int nowait) 1947 struct sk_buff *skb, struct rtmsg *rtm, int nowait)
1655{ 1948{
1656 int err; 1949 int err;
1950 struct mr_table *mrt;
1657 struct mfc_cache *cache; 1951 struct mfc_cache *cache;
1658 struct rtable *rt = skb_rtable(skb); 1952 struct rtable *rt = skb_rtable(skb);
1659 1953
1954 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
1955 if (mrt == NULL)
1956 return -ENOENT;
1957
1660 read_lock(&mrt_lock); 1958 read_lock(&mrt_lock);
1661 cache = ipmr_cache_find(net, rt->rt_src, rt->rt_dst); 1959 cache = ipmr_cache_find(mrt, rt->rt_src, rt->rt_dst);
1662 1960
1663 if (cache == NULL) { 1961 if (cache == NULL) {
1664 struct sk_buff *skb2; 1962 struct sk_buff *skb2;
@@ -1672,7 +1970,7 @@ int ipmr_get_route(struct net *net,
1672 } 1970 }
1673 1971
1674 dev = skb->dev; 1972 dev = skb->dev;
1675 if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) { 1973 if (dev == NULL || (vif = ipmr_find_vif(mrt, dev)) < 0) {
1676 read_unlock(&mrt_lock); 1974 read_unlock(&mrt_lock);
1677 return -ENODEV; 1975 return -ENODEV;
1678 } 1976 }
@@ -1689,14 +1987,14 @@ int ipmr_get_route(struct net *net,
1689 iph->saddr = rt->rt_src; 1987 iph->saddr = rt->rt_src;
1690 iph->daddr = rt->rt_dst; 1988 iph->daddr = rt->rt_dst;
1691 iph->version = 0; 1989 iph->version = 0;
1692 err = ipmr_cache_unresolved(net, vif, skb2); 1990 err = ipmr_cache_unresolved(mrt, vif, skb2);
1693 read_unlock(&mrt_lock); 1991 read_unlock(&mrt_lock);
1694 return err; 1992 return err;
1695 } 1993 }
1696 1994
1697 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY)) 1995 if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
1698 cache->mfc_flags |= MFC_NOTIFY; 1996 cache->mfc_flags |= MFC_NOTIFY;
1699 err = ipmr_fill_mroute(skb, cache, rtm); 1997 err = ipmr_fill_mroute(mrt, skb, cache, rtm);
1700 read_unlock(&mrt_lock); 1998 read_unlock(&mrt_lock);
1701 return err; 1999 return err;
1702} 2000}
@@ -1707,6 +2005,7 @@ int ipmr_get_route(struct net *net,
1707 */ 2005 */
1708struct ipmr_vif_iter { 2006struct ipmr_vif_iter {
1709 struct seq_net_private p; 2007 struct seq_net_private p;
2008 struct mr_table *mrt;
1710 int ct; 2009 int ct;
1711}; 2010};
1712 2011
@@ -1714,11 +2013,13 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1714 struct ipmr_vif_iter *iter, 2013 struct ipmr_vif_iter *iter,
1715 loff_t pos) 2014 loff_t pos)
1716{ 2015{
1717 for (iter->ct = 0; iter->ct < net->ipv4.maxvif; ++iter->ct) { 2016 struct mr_table *mrt = iter->mrt;
1718 if (!VIF_EXISTS(net, iter->ct)) 2017
2018 for (iter->ct = 0; iter->ct < mrt->maxvif; ++iter->ct) {
2019 if (!VIF_EXISTS(mrt, iter->ct))
1719 continue; 2020 continue;
1720 if (pos-- == 0) 2021 if (pos-- == 0)
1721 return &net->ipv4.vif_table[iter->ct]; 2022 return &mrt->vif_table[iter->ct];
1722 } 2023 }
1723 return NULL; 2024 return NULL;
1724} 2025}
@@ -1726,7 +2027,15 @@ static struct vif_device *ipmr_vif_seq_idx(struct net *net,
1726static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos) 2027static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
1727 __acquires(mrt_lock) 2028 __acquires(mrt_lock)
1728{ 2029{
2030 struct ipmr_vif_iter *iter = seq->private;
1729 struct net *net = seq_file_net(seq); 2031 struct net *net = seq_file_net(seq);
2032 struct mr_table *mrt;
2033
2034 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2035 if (mrt == NULL)
2036 return ERR_PTR(-ENOENT);
2037
2038 iter->mrt = mrt;
1730 2039
1731 read_lock(&mrt_lock); 2040 read_lock(&mrt_lock);
1732 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1) 2041 return *pos ? ipmr_vif_seq_idx(net, seq->private, *pos - 1)
@@ -1737,15 +2046,16 @@ static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1737{ 2046{
1738 struct ipmr_vif_iter *iter = seq->private; 2047 struct ipmr_vif_iter *iter = seq->private;
1739 struct net *net = seq_file_net(seq); 2048 struct net *net = seq_file_net(seq);
2049 struct mr_table *mrt = iter->mrt;
1740 2050
1741 ++*pos; 2051 ++*pos;
1742 if (v == SEQ_START_TOKEN) 2052 if (v == SEQ_START_TOKEN)
1743 return ipmr_vif_seq_idx(net, iter, 0); 2053 return ipmr_vif_seq_idx(net, iter, 0);
1744 2054
1745 while (++iter->ct < net->ipv4.maxvif) { 2055 while (++iter->ct < mrt->maxvif) {
1746 if (!VIF_EXISTS(net, iter->ct)) 2056 if (!VIF_EXISTS(mrt, iter->ct))
1747 continue; 2057 continue;
1748 return &net->ipv4.vif_table[iter->ct]; 2058 return &mrt->vif_table[iter->ct];
1749 } 2059 }
1750 return NULL; 2060 return NULL;
1751} 2061}
@@ -1758,7 +2068,8 @@ static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
1758 2068
1759static int ipmr_vif_seq_show(struct seq_file *seq, void *v) 2069static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1760{ 2070{
1761 struct net *net = seq_file_net(seq); 2071 struct ipmr_vif_iter *iter = seq->private;
2072 struct mr_table *mrt = iter->mrt;
1762 2073
1763 if (v == SEQ_START_TOKEN) { 2074 if (v == SEQ_START_TOKEN) {
1764 seq_puts(seq, 2075 seq_puts(seq,
@@ -1769,7 +2080,7 @@ static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
1769 2080
1770 seq_printf(seq, 2081 seq_printf(seq,
1771 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n", 2082 "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
1772 vif - net->ipv4.vif_table, 2083 vif - mrt->vif_table,
1773 name, vif->bytes_in, vif->pkt_in, 2084 name, vif->bytes_in, vif->pkt_in,
1774 vif->bytes_out, vif->pkt_out, 2085 vif->bytes_out, vif->pkt_out,
1775 vif->flags, vif->local, vif->remote); 2086 vif->flags, vif->local, vif->remote);
@@ -1800,7 +2111,8 @@ static const struct file_operations ipmr_vif_fops = {
1800 2111
1801struct ipmr_mfc_iter { 2112struct ipmr_mfc_iter {
1802 struct seq_net_private p; 2113 struct seq_net_private p;
1803 struct mfc_cache **cache; 2114 struct mr_table *mrt;
2115 struct list_head *cache;
1804 int ct; 2116 int ct;
1805}; 2117};
1806 2118
@@ -1808,22 +2120,22 @@ struct ipmr_mfc_iter {
1808static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net, 2120static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
1809 struct ipmr_mfc_iter *it, loff_t pos) 2121 struct ipmr_mfc_iter *it, loff_t pos)
1810{ 2122{
2123 struct mr_table *mrt = it->mrt;
1811 struct mfc_cache *mfc; 2124 struct mfc_cache *mfc;
1812 2125
1813 it->cache = net->ipv4.mfc_cache_array;
1814 read_lock(&mrt_lock); 2126 read_lock(&mrt_lock);
1815 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) 2127 for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
1816 for (mfc = net->ipv4.mfc_cache_array[it->ct]; 2128 it->cache = &mrt->mfc_cache_array[it->ct];
1817 mfc; mfc = mfc->next) 2129 list_for_each_entry(mfc, it->cache, list)
1818 if (pos-- == 0) 2130 if (pos-- == 0)
1819 return mfc; 2131 return mfc;
2132 }
1820 read_unlock(&mrt_lock); 2133 read_unlock(&mrt_lock);
1821 2134
1822 it->cache = &mfc_unres_queue;
1823 spin_lock_bh(&mfc_unres_lock); 2135 spin_lock_bh(&mfc_unres_lock);
1824 for (mfc = mfc_unres_queue; mfc; mfc = mfc->next) 2136 it->cache = &mrt->mfc_unres_queue;
1825 if (net_eq(mfc_net(mfc), net) && 2137 list_for_each_entry(mfc, it->cache, list)
1826 pos-- == 0) 2138 if (pos-- == 0)
1827 return mfc; 2139 return mfc;
1828 spin_unlock_bh(&mfc_unres_lock); 2140 spin_unlock_bh(&mfc_unres_lock);
1829 2141
@@ -1836,7 +2148,13 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
1836{ 2148{
1837 struct ipmr_mfc_iter *it = seq->private; 2149 struct ipmr_mfc_iter *it = seq->private;
1838 struct net *net = seq_file_net(seq); 2150 struct net *net = seq_file_net(seq);
2151 struct mr_table *mrt;
2152
2153 mrt = ipmr_get_table(net, RT_TABLE_DEFAULT);
2154 if (mrt == NULL)
2155 return ERR_PTR(-ENOENT);
1839 2156
2157 it->mrt = mrt;
1840 it->cache = NULL; 2158 it->cache = NULL;
1841 it->ct = 0; 2159 it->ct = 0;
1842 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1) 2160 return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
@@ -1848,37 +2166,36 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1848 struct mfc_cache *mfc = v; 2166 struct mfc_cache *mfc = v;
1849 struct ipmr_mfc_iter *it = seq->private; 2167 struct ipmr_mfc_iter *it = seq->private;
1850 struct net *net = seq_file_net(seq); 2168 struct net *net = seq_file_net(seq);
2169 struct mr_table *mrt = it->mrt;
1851 2170
1852 ++*pos; 2171 ++*pos;
1853 2172
1854 if (v == SEQ_START_TOKEN) 2173 if (v == SEQ_START_TOKEN)
1855 return ipmr_mfc_seq_idx(net, seq->private, 0); 2174 return ipmr_mfc_seq_idx(net, seq->private, 0);
1856 2175
1857 if (mfc->next) 2176 if (mfc->list.next != it->cache)
1858 return mfc->next; 2177 return list_entry(mfc->list.next, struct mfc_cache, list);
1859 2178
1860 if (it->cache == &mfc_unres_queue) 2179 if (it->cache == &mrt->mfc_unres_queue)
1861 goto end_of_list; 2180 goto end_of_list;
1862 2181
1863 BUG_ON(it->cache != net->ipv4.mfc_cache_array); 2182 BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
1864 2183
1865 while (++it->ct < MFC_LINES) { 2184 while (++it->ct < MFC_LINES) {
1866 mfc = net->ipv4.mfc_cache_array[it->ct]; 2185 it->cache = &mrt->mfc_cache_array[it->ct];
1867 if (mfc) 2186 if (list_empty(it->cache))
1868 return mfc; 2187 continue;
2188 return list_first_entry(it->cache, struct mfc_cache, list);
1869 } 2189 }
1870 2190
1871 /* exhausted cache_array, show unresolved */ 2191 /* exhausted cache_array, show unresolved */
1872 read_unlock(&mrt_lock); 2192 read_unlock(&mrt_lock);
1873 it->cache = &mfc_unres_queue; 2193 it->cache = &mrt->mfc_unres_queue;
1874 it->ct = 0; 2194 it->ct = 0;
1875 2195
1876 spin_lock_bh(&mfc_unres_lock); 2196 spin_lock_bh(&mfc_unres_lock);
1877 mfc = mfc_unres_queue; 2197 if (!list_empty(it->cache))
1878 while (mfc && !net_eq(mfc_net(mfc), net)) 2198 return list_first_entry(it->cache, struct mfc_cache, list);
1879 mfc = mfc->next;
1880 if (mfc)
1881 return mfc;
1882 2199
1883 end_of_list: 2200 end_of_list:
1884 spin_unlock_bh(&mfc_unres_lock); 2201 spin_unlock_bh(&mfc_unres_lock);
@@ -1890,18 +2207,17 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1890static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v) 2207static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
1891{ 2208{
1892 struct ipmr_mfc_iter *it = seq->private; 2209 struct ipmr_mfc_iter *it = seq->private;
1893 struct net *net = seq_file_net(seq); 2210 struct mr_table *mrt = it->mrt;
1894 2211
1895 if (it->cache == &mfc_unres_queue) 2212 if (it->cache == &mrt->mfc_unres_queue)
1896 spin_unlock_bh(&mfc_unres_lock); 2213 spin_unlock_bh(&mfc_unres_lock);
1897 else if (it->cache == net->ipv4.mfc_cache_array) 2214 else if (it->cache == &mrt->mfc_cache_array[it->ct])
1898 read_unlock(&mrt_lock); 2215 read_unlock(&mrt_lock);
1899} 2216}
1900 2217
1901static int ipmr_mfc_seq_show(struct seq_file *seq, void *v) 2218static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1902{ 2219{
1903 int n; 2220 int n;
1904 struct net *net = seq_file_net(seq);
1905 2221
1906 if (v == SEQ_START_TOKEN) { 2222 if (v == SEQ_START_TOKEN) {
1907 seq_puts(seq, 2223 seq_puts(seq,
@@ -1909,20 +2225,21 @@ static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
1909 } else { 2225 } else {
1910 const struct mfc_cache *mfc = v; 2226 const struct mfc_cache *mfc = v;
1911 const struct ipmr_mfc_iter *it = seq->private; 2227 const struct ipmr_mfc_iter *it = seq->private;
2228 const struct mr_table *mrt = it->mrt;
1912 2229
1913 seq_printf(seq, "%08lX %08lX %-3hd", 2230 seq_printf(seq, "%08X %08X %-3hd",
1914 (unsigned long) mfc->mfc_mcastgrp, 2231 (__force u32) mfc->mfc_mcastgrp,
1915 (unsigned long) mfc->mfc_origin, 2232 (__force u32) mfc->mfc_origin,
1916 mfc->mfc_parent); 2233 mfc->mfc_parent);
1917 2234
1918 if (it->cache != &mfc_unres_queue) { 2235 if (it->cache != &mrt->mfc_unres_queue) {
1919 seq_printf(seq, " %8lu %8lu %8lu", 2236 seq_printf(seq, " %8lu %8lu %8lu",
1920 mfc->mfc_un.res.pkt, 2237 mfc->mfc_un.res.pkt,
1921 mfc->mfc_un.res.bytes, 2238 mfc->mfc_un.res.bytes,
1922 mfc->mfc_un.res.wrong_if); 2239 mfc->mfc_un.res.wrong_if);
1923 for (n = mfc->mfc_un.res.minvif; 2240 for (n = mfc->mfc_un.res.minvif;
1924 n < mfc->mfc_un.res.maxvif; n++ ) { 2241 n < mfc->mfc_un.res.maxvif; n++ ) {
1925 if (VIF_EXISTS(net, n) && 2242 if (VIF_EXISTS(mrt, n) &&
1926 mfc->mfc_un.res.ttls[n] < 255) 2243 mfc->mfc_un.res.ttls[n] < 255)
1927 seq_printf(seq, 2244 seq_printf(seq,
1928 " %2d:%-3d", 2245 " %2d:%-3d",
@@ -1974,27 +2291,11 @@ static const struct net_protocol pim_protocol = {
1974 */ 2291 */
1975static int __net_init ipmr_net_init(struct net *net) 2292static int __net_init ipmr_net_init(struct net *net)
1976{ 2293{
1977 int err = 0; 2294 int err;
1978 2295
1979 net->ipv4.vif_table = kcalloc(MAXVIFS, sizeof(struct vif_device), 2296 err = ipmr_rules_init(net);
1980 GFP_KERNEL); 2297 if (err < 0)
1981 if (!net->ipv4.vif_table) {
1982 err = -ENOMEM;
1983 goto fail; 2298 goto fail;
1984 }
1985
1986 /* Forwarding cache */
1987 net->ipv4.mfc_cache_array = kcalloc(MFC_LINES,
1988 sizeof(struct mfc_cache *),
1989 GFP_KERNEL);
1990 if (!net->ipv4.mfc_cache_array) {
1991 err = -ENOMEM;
1992 goto fail_mfc_cache;
1993 }
1994
1995#ifdef CONFIG_IP_PIMSM
1996 net->ipv4.mroute_reg_vif_num = -1;
1997#endif
1998 2299
1999#ifdef CONFIG_PROC_FS 2300#ifdef CONFIG_PROC_FS
2000 err = -ENOMEM; 2301 err = -ENOMEM;
@@ -2009,10 +2310,8 @@ static int __net_init ipmr_net_init(struct net *net)
2009proc_cache_fail: 2310proc_cache_fail:
2010 proc_net_remove(net, "ip_mr_vif"); 2311 proc_net_remove(net, "ip_mr_vif");
2011proc_vif_fail: 2312proc_vif_fail:
2012 kfree(net->ipv4.mfc_cache_array); 2313 ipmr_rules_exit(net);
2013#endif 2314#endif
2014fail_mfc_cache:
2015 kfree(net->ipv4.vif_table);
2016fail: 2315fail:
2017 return err; 2316 return err;
2018} 2317}
@@ -2023,8 +2322,7 @@ static void __net_exit ipmr_net_exit(struct net *net)
2023 proc_net_remove(net, "ip_mr_cache"); 2322 proc_net_remove(net, "ip_mr_cache");
2024 proc_net_remove(net, "ip_mr_vif"); 2323 proc_net_remove(net, "ip_mr_vif");
2025#endif 2324#endif
2026 kfree(net->ipv4.mfc_cache_array); 2325 ipmr_rules_exit(net);
2027 kfree(net->ipv4.vif_table);
2028} 2326}
2029 2327
2030static struct pernet_operations ipmr_net_ops = { 2328static struct pernet_operations ipmr_net_ops = {
@@ -2047,7 +2345,6 @@ int __init ip_mr_init(void)
2047 if (err) 2345 if (err)
2048 goto reg_pernet_fail; 2346 goto reg_pernet_fail;
2049 2347
2050 setup_timer(&ipmr_expire_timer, ipmr_expire_process, 0);
2051 err = register_netdevice_notifier(&ip_mr_notifier); 2348 err = register_netdevice_notifier(&ip_mr_notifier);
2052 if (err) 2349 if (err)
2053 goto reg_notif_fail; 2350 goto reg_notif_fail;
@@ -2065,7 +2362,6 @@ add_proto_fail:
2065 unregister_netdevice_notifier(&ip_mr_notifier); 2362 unregister_netdevice_notifier(&ip_mr_notifier);
2066#endif 2363#endif
2067reg_notif_fail: 2364reg_notif_fail:
2068 del_timer(&ipmr_expire_timer);
2069 unregister_pernet_subsys(&ipmr_net_ops); 2365 unregister_pernet_subsys(&ipmr_net_ops);
2070reg_pernet_fail: 2366reg_pernet_fail:
2071 kmem_cache_destroy(mrt_cachep); 2367 kmem_cache_destroy(mrt_cachep);
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index ab828400ed71..a992dc826f1c 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -88,7 +88,7 @@ clusterip_config_entry_put(struct clusterip_config *c)
88 list_del(&c->list); 88 list_del(&c->list);
89 write_unlock_bh(&clusterip_lock); 89 write_unlock_bh(&clusterip_lock);
90 90
91 dev_mc_delete(c->dev, c->clustermac, ETH_ALEN, 0); 91 dev_mc_del(c->dev, c->clustermac);
92 dev_put(c->dev); 92 dev_put(c->dev);
93 93
94 /* In case anyone still accesses the file, the open/close 94 /* In case anyone still accesses the file, the open/close
@@ -397,7 +397,7 @@ static bool clusterip_tg_check(const struct xt_tgchk_param *par)
397 dev_put(dev); 397 dev_put(dev);
398 return false; 398 return false;
399 } 399 }
400 dev_mc_add(config->dev,config->clustermac, ETH_ALEN, 0); 400 dev_mc_add(config->dev, config->clustermac);
401 } 401 }
402 } 402 }
403 cipinfo->config = config; 403 cipinfo->config = config;
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4f1f337f4337..3dc9914c1dce 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -251,6 +251,7 @@ static const struct snmp_mib snmp4_net_list[] = {
251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK), 251 SNMP_MIB_ITEM("TCPSackShiftFallback", LINUX_MIB_SACKSHIFTFALLBACK),
252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP), 252 SNMP_MIB_ITEM("TCPBacklogDrop", LINUX_MIB_TCPBACKLOGDROP),
253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP), 253 SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
254 SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
254 SNMP_MIB_SENTINEL 255 SNMP_MIB_SENTINEL
255}; 256};
256 257
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index cb562fdd9b9a..a947428ef0ae 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -258,10 +258,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
258 (__raw_get_cpu_var(rt_cache_stat).field++) 258 (__raw_get_cpu_var(rt_cache_stat).field++)
259 259
260static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx, 260static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
261 int genid) 261 int genid)
262{ 262{
263 return jhash_3words((__force u32)(__be32)(daddr), 263 return jhash_3words((__force u32)daddr, (__force u32)saddr,
264 (__force u32)(__be32)(saddr),
265 idx, genid) 264 idx, genid)
266 & rt_hash_mask; 265 & rt_hash_mask;
267} 266}
@@ -378,12 +377,13 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
378 struct rtable *r = v; 377 struct rtable *r = v;
379 int len; 378 int len;
380 379
381 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" 380 seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
382 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", 381 "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
383 r->u.dst.dev ? r->u.dst.dev->name : "*", 382 r->u.dst.dev ? r->u.dst.dev->name : "*",
384 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, 383 (__force u32)r->rt_dst,
384 (__force u32)r->rt_gateway,
385 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 385 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
386 r->u.dst.__use, 0, (unsigned long)r->rt_src, 386 r->u.dst.__use, 0, (__force u32)r->rt_src,
387 (dst_metric(&r->u.dst, RTAX_ADVMSS) ? 387 (dst_metric(&r->u.dst, RTAX_ADVMSS) ?
388 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0), 388 (int)dst_metric(&r->u.dst, RTAX_ADVMSS) + 40 : 0),
389 dst_metric(&r->u.dst, RTAX_WINDOW), 389 dst_metric(&r->u.dst, RTAX_WINDOW),
@@ -685,18 +685,17 @@ static inline bool rt_caching(const struct net *net)
685static inline bool compare_hash_inputs(const struct flowi *fl1, 685static inline bool compare_hash_inputs(const struct flowi *fl1,
686 const struct flowi *fl2) 686 const struct flowi *fl2)
687{ 687{
688 return (__force u32)(((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 688 return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
689 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr) | 689 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
690 (fl1->iif ^ fl2->iif)) == 0); 690 (fl1->iif ^ fl2->iif)) == 0);
691} 691}
692 692
693static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 693static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
694{ 694{
695 return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) | 695 return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
696 (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) | 696 ((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
697 (fl1->mark ^ fl2->mark) | 697 (fl1->mark ^ fl2->mark) |
698 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ 698 (*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
699 *(u16 *)&fl2->nl_u.ip4_u.tos) |
700 (fl1->oif ^ fl2->oif) | 699 (fl1->oif ^ fl2->oif) |
701 (fl1->iif ^ fl2->iif)) == 0; 700 (fl1->iif ^ fl2->iif)) == 0;
702} 701}
@@ -2319,8 +2318,8 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2319 rcu_read_lock(); 2318 rcu_read_lock();
2320 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2319 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
2321 rth = rcu_dereference(rth->u.dst.rt_next)) { 2320 rth = rcu_dereference(rth->u.dst.rt_next)) {
2322 if (((rth->fl.fl4_dst ^ daddr) | 2321 if ((((__force u32)rth->fl.fl4_dst ^ (__force u32)daddr) |
2323 (rth->fl.fl4_src ^ saddr) | 2322 ((__force u32)rth->fl.fl4_src ^ (__force u32)saddr) |
2324 (rth->fl.iif ^ iif) | 2323 (rth->fl.iif ^ iif) |
2325 rth->fl.oif | 2324 rth->fl.oif |
2326 (rth->fl.fl4_tos ^ tos)) == 0 && 2325 (rth->fl.fl4_tos ^ tos)) == 0 &&
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0f8caf64caa3..6689c61cab47 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -378,7 +378,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
378 struct sock *sk = sock->sk; 378 struct sock *sk = sock->sk;
379 struct tcp_sock *tp = tcp_sk(sk); 379 struct tcp_sock *tp = tcp_sk(sk);
380 380
381 sock_poll_wait(file, sk->sk_sleep, wait); 381 sock_poll_wait(file, sk_sleep(sk), wait);
382 if (sk->sk_state == TCP_LISTEN) 382 if (sk->sk_state == TCP_LISTEN)
383 return inet_csk_listen_poll(sk); 383 return inet_csk_listen_poll(sk);
384 384
@@ -2721,7 +2721,7 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2721 struct tcphdr *th2; 2721 struct tcphdr *th2;
2722 unsigned int len; 2722 unsigned int len;
2723 unsigned int thlen; 2723 unsigned int thlen;
2724 unsigned int flags; 2724 __be32 flags;
2725 unsigned int mss = 1; 2725 unsigned int mss = 1;
2726 unsigned int hlen; 2726 unsigned int hlen;
2727 unsigned int off; 2727 unsigned int off;
@@ -2771,10 +2771,10 @@ struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2771 2771
2772found: 2772found:
2773 flush = NAPI_GRO_CB(p)->flush; 2773 flush = NAPI_GRO_CB(p)->flush;
2774 flush |= flags & TCP_FLAG_CWR; 2774 flush |= (__force int)(flags & TCP_FLAG_CWR);
2775 flush |= (flags ^ tcp_flag_word(th2)) & 2775 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH); 2776 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
2777 flush |= th->ack_seq ^ th2->ack_seq; 2777 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
2778 for (i = sizeof(*th); i < thlen; i += 4) 2778 for (i = sizeof(*th); i < thlen; i += 4)
2779 flush |= *(u32 *)((u8 *)th + i) ^ 2779 flush |= *(u32 *)((u8 *)th + i) ^
2780 *(u32 *)((u8 *)th2 + i); 2780 *(u32 *)((u8 *)th2 + i);
@@ -2795,8 +2795,9 @@ found:
2795 2795
2796out_check_final: 2796out_check_final:
2797 flush = len < mss; 2797 flush = len < mss;
2798 flush |= flags & (TCP_FLAG_URG | TCP_FLAG_PSH | TCP_FLAG_RST | 2798 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
2799 TCP_FLAG_SYN | TCP_FLAG_FIN); 2799 TCP_FLAG_RST | TCP_FLAG_SYN |
2800 TCP_FLAG_FIN));
2800 2801
2801 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) 2802 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
2802 pp = head; 2803 pp = head;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index f240f57b2199..ae3ec15fb630 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3710,7 +3710,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
3710 } 3710 }
3711 3711
3712 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3712 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP))
3713 dst_confirm(sk->sk_dst_cache); 3713 dst_confirm(__sk_dst_get(sk));
3714 3714
3715 return 1; 3715 return 1;
3716 3716
@@ -4319,7 +4319,7 @@ static void tcp_ofo_queue(struct sock *sk)
4319 } 4319 }
4320 4320
4321 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { 4321 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
4322 SOCK_DEBUG(sk, "ofo packet was already received \n"); 4322 SOCK_DEBUG(sk, "ofo packet was already received\n");
4323 __skb_unlink(skb, &tp->out_of_order_queue); 4323 __skb_unlink(skb, &tp->out_of_order_queue);
4324 __kfree_skb(skb); 4324 __kfree_skb(skb);
4325 continue; 4325 continue;
@@ -5833,7 +5833,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5833 if (tp->snd_una == tp->write_seq) { 5833 if (tp->snd_una == tp->write_seq) {
5834 tcp_set_state(sk, TCP_FIN_WAIT2); 5834 tcp_set_state(sk, TCP_FIN_WAIT2);
5835 sk->sk_shutdown |= SEND_SHUTDOWN; 5835 sk->sk_shutdown |= SEND_SHUTDOWN;
5836 dst_confirm(sk->sk_dst_cache); 5836 dst_confirm(__sk_dst_get(sk));
5837 5837
5838 if (!sock_flag(sk, SOCK_DEAD)) 5838 if (!sock_flag(sk, SOCK_DEAD))
5839 /* Wake up lingering close() */ 5839 /* Wake up lingering close() */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3c23e70885f4..4d6717d1e61c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -519,26 +519,31 @@ out:
519 sock_put(sk); 519 sock_put(sk);
520} 520}
521 521
522/* This routine computes an IPv4 TCP checksum. */ 522static void __tcp_v4_send_check(struct sk_buff *skb,
523void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) 523 __be32 saddr, __be32 daddr)
524{ 524{
525 struct inet_sock *inet = inet_sk(sk);
526 struct tcphdr *th = tcp_hdr(skb); 525 struct tcphdr *th = tcp_hdr(skb);
527 526
528 if (skb->ip_summed == CHECKSUM_PARTIAL) { 527 if (skb->ip_summed == CHECKSUM_PARTIAL) {
529 th->check = ~tcp_v4_check(len, inet->inet_saddr, 528 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
530 inet->inet_daddr, 0);
531 skb->csum_start = skb_transport_header(skb) - skb->head; 529 skb->csum_start = skb_transport_header(skb) - skb->head;
532 skb->csum_offset = offsetof(struct tcphdr, check); 530 skb->csum_offset = offsetof(struct tcphdr, check);
533 } else { 531 } else {
534 th->check = tcp_v4_check(len, inet->inet_saddr, 532 th->check = tcp_v4_check(skb->len, saddr, daddr,
535 inet->inet_daddr,
536 csum_partial(th, 533 csum_partial(th,
537 th->doff << 2, 534 th->doff << 2,
538 skb->csum)); 535 skb->csum));
539 } 536 }
540} 537}
541 538
539/* This routine computes an IPv4 TCP checksum. */
540void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
541{
542 struct inet_sock *inet = inet_sk(sk);
543
544 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
545}
546
542int tcp_v4_gso_send_check(struct sk_buff *skb) 547int tcp_v4_gso_send_check(struct sk_buff *skb)
543{ 548{
544 const struct iphdr *iph; 549 const struct iphdr *iph;
@@ -551,10 +556,8 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
551 th = tcp_hdr(skb); 556 th = tcp_hdr(skb);
552 557
553 th->check = 0; 558 th->check = 0;
554 th->check = ~tcp_v4_check(skb->len, iph->saddr, iph->daddr, 0);
555 skb->csum_start = skb_transport_header(skb) - skb->head;
556 skb->csum_offset = offsetof(struct tcphdr, check);
557 skb->ip_summed = CHECKSUM_PARTIAL; 559 skb->ip_summed = CHECKSUM_PARTIAL;
560 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
558 return 0; 561 return 0;
559} 562}
560 563
@@ -763,13 +766,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
763 skb = tcp_make_synack(sk, dst, req, rvp); 766 skb = tcp_make_synack(sk, dst, req, rvp);
764 767
765 if (skb) { 768 if (skb) {
766 struct tcphdr *th = tcp_hdr(skb); 769 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
767
768 th->check = tcp_v4_check(skb->len,
769 ireq->loc_addr,
770 ireq->rmt_addr,
771 csum_partial(th, skb->len,
772 skb->csum));
773 770
774 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 771 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
775 ireq->rmt_addr, 772 ireq->rmt_addr,
@@ -1289,8 +1286,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1289 goto drop_and_release; 1286 goto drop_and_release;
1290 1287
1291 /* Secret recipe starts with IP addresses */ 1288 /* Secret recipe starts with IP addresses */
1292 *mess++ ^= daddr; 1289 *mess++ ^= (__force u32)daddr;
1293 *mess++ ^= saddr; 1290 *mess++ ^= (__force u32)saddr;
1294 1291
1295 /* plus variable length Initiator Cookie */ 1292 /* plus variable length Initiator Cookie */
1296 c = (u8 *)mess; 1293 c = (u8 *)mess;
@@ -1675,6 +1672,8 @@ process:
1675 1672
1676 skb->dev = NULL; 1673 skb->dev = NULL;
1677 1674
1675 inet_rps_save_rxhash(sk, skb->rxhash);
1676
1678 bh_lock_sock_nested(sk); 1677 bh_lock_sock_nested(sk);
1679 ret = 0; 1678 ret = 0;
1680 if (!sock_owned_by_user(sk)) { 1679 if (!sock_owned_by_user(sk)) {
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 5fabff9ac6d6..794c2e122a41 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -672,6 +672,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
672 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && 672 if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
673 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { 673 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
674 inet_rsk(req)->acked = 1; 674 inet_rsk(req)->acked = 1;
675 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
675 return NULL; 676 return NULL;
676 } 677 }
677 678
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 0dda86e72ad8..429ad9286efc 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -350,6 +350,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
350 */ 350 */
351static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) 351static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
352{ 352{
353 skb->ip_summed = CHECKSUM_PARTIAL;
353 skb->csum = 0; 354 skb->csum = 0;
354 355
355 TCP_SKB_CB(skb)->flags = flags; 356 TCP_SKB_CB(skb)->flags = flags;
@@ -860,7 +861,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
860 th->urg_ptr = htons(tp->snd_up - tcb->seq); 861 th->urg_ptr = htons(tp->snd_up - tcb->seq);
861 th->urg = 1; 862 th->urg = 1;
862 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { 863 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
863 th->urg_ptr = 0xFFFF; 864 th->urg_ptr = htons(0xFFFF);
864 th->urg = 1; 865 th->urg = 1;
865 } 866 }
866 } 867 }
@@ -878,7 +879,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
878 } 879 }
879#endif 880#endif
880 881
881 icsk->icsk_af_ops->send_check(sk, skb->len, skb); 882 icsk->icsk_af_ops->send_check(sk, skb);
882 883
883 if (likely(tcb->flags & TCPCB_FLAG_ACK)) 884 if (likely(tcb->flags & TCPCB_FLAG_ACK))
884 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); 885 tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
@@ -889,7 +890,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
889 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) 890 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq)
890 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); 891 TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
891 892
892 err = icsk->icsk_af_ops->queue_xmit(skb, 0); 893 err = icsk->icsk_af_ops->queue_xmit(skb);
893 if (likely(err <= 0)) 894 if (likely(err <= 0))
894 return err; 895 return err;
895 896
@@ -2484,7 +2485,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2484 *tail-- ^= TCP_SKB_CB(skb)->seq + 1; 2485 *tail-- ^= TCP_SKB_CB(skb)->seq + 1;
2485 2486
2486 /* recommended */ 2487 /* recommended */
2487 *tail-- ^= ((th->dest << 16) | th->source); 2488 *tail-- ^= (((__force u32)th->dest << 16) | (__force u32)th->source);
2488 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */ 2489 *tail-- ^= (u32)(unsigned long)cvp; /* per sockopt */
2489 2490
2490 sha_transform((__u32 *)&xvp->cookie_bakery[0], 2491 sha_transform((__u32 *)&xvp->cookie_bakery[0],
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 8a0ab2977f1f..c732be00606b 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -172,14 +172,14 @@ static int tcp_write_timeout(struct sock *sk)
172 172
173 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { 173 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
174 if (icsk->icsk_retransmits) 174 if (icsk->icsk_retransmits)
175 dst_negative_advice(&sk->sk_dst_cache, sk); 175 dst_negative_advice(sk);
176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; 176 retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
177 } else { 177 } else {
178 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) { 178 if (retransmits_timed_out(sk, sysctl_tcp_retries1)) {
179 /* Black hole detection */ 179 /* Black hole detection */
180 tcp_mtu_probing(icsk, sk); 180 tcp_mtu_probing(icsk, sk);
181 181
182 dst_negative_advice(&sk->sk_dst_cache, sk); 182 dst_negative_advice(sk);
183 } 183 }
184 184
185 retry_until = sysctl_tcp_retries2; 185 retry_until = sysctl_tcp_retries2;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 8fef859db35d..1e18f9cc9247 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -307,13 +307,13 @@ static int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr, 307static unsigned int udp4_portaddr_hash(struct net *net, __be32 saddr,
308 unsigned int port) 308 unsigned int port)
309{ 309{
310 return jhash_1word(saddr, net_hash_mix(net)) ^ port; 310 return jhash_1word((__force u32)saddr, net_hash_mix(net)) ^ port;
311} 311}
312 312
313int udp_v4_get_port(struct sock *sk, unsigned short snum) 313int udp_v4_get_port(struct sock *sk, unsigned short snum)
314{ 314{
315 unsigned int hash2_nulladdr = 315 unsigned int hash2_nulladdr =
316 udp4_portaddr_hash(sock_net(sk), INADDR_ANY, snum); 316 udp4_portaddr_hash(sock_net(sk), htonl(INADDR_ANY), snum);
317 unsigned int hash2_partial = 317 unsigned int hash2_partial =
318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0); 318 udp4_portaddr_hash(sock_net(sk), inet_sk(sk)->inet_rcv_saddr, 0);
319 319
@@ -466,14 +466,14 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
466 daddr, hnum, dif, 466 daddr, hnum, dif,
467 hslot2, slot2); 467 hslot2, slot2);
468 if (!result) { 468 if (!result) {
469 hash2 = udp4_portaddr_hash(net, INADDR_ANY, hnum); 469 hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
470 slot2 = hash2 & udptable->mask; 470 slot2 = hash2 & udptable->mask;
471 hslot2 = &udptable->hash2[slot2]; 471 hslot2 = &udptable->hash2[slot2];
472 if (hslot->count < hslot2->count) 472 if (hslot->count < hslot2->count)
473 goto begin; 473 goto begin;
474 474
475 result = udp4_lib_lookup2(net, saddr, sport, 475 result = udp4_lib_lookup2(net, saddr, sport,
476 INADDR_ANY, hnum, dif, 476 htonl(INADDR_ANY), hnum, dif,
477 hslot2, slot2); 477 hslot2, slot2);
478 } 478 }
479 rcu_read_unlock(); 479 rcu_read_unlock();
@@ -1217,6 +1217,7 @@ int udp_disconnect(struct sock *sk, int flags)
1217 sk->sk_state = TCP_CLOSE; 1217 sk->sk_state = TCP_CLOSE;
1218 inet->inet_daddr = 0; 1218 inet->inet_daddr = 0;
1219 inet->inet_dport = 0; 1219 inet->inet_dport = 0;
1220 inet_rps_save_rxhash(sk, 0);
1220 sk->sk_bound_dev_if = 0; 1221 sk->sk_bound_dev_if = 0;
1221 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) 1222 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1222 inet_reset_saddr(sk); 1223 inet_reset_saddr(sk);
@@ -1258,8 +1259,12 @@ EXPORT_SYMBOL(udp_lib_unhash);
1258 1259
1259static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 1260static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1260{ 1261{
1261 int rc = sock_queue_rcv_skb(sk, skb); 1262 int rc;
1263
1264 if (inet_sk(sk)->inet_daddr)
1265 inet_rps_save_rxhash(sk, skb->rxhash);
1262 1266
1267 rc = sock_queue_rcv_skb(sk, skb);
1263 if (rc < 0) { 1268 if (rc < 0) {
1264 int is_udplite = IS_UDPLITE(sk); 1269 int is_udplite = IS_UDPLITE(sk);
1265 1270
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index e4a1483fba77..1705476670ef 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -59,27 +59,6 @@ static int xfrm4_get_saddr(struct net *net,
59 return 0; 59 return 0;
60} 60}
61 61
62static struct dst_entry *
63__xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
64{
65 struct dst_entry *dst;
66
67 read_lock_bh(&policy->lock);
68 for (dst = policy->bundles; dst; dst = dst->next) {
69 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
70 if (xdst->u.rt.fl.oif == fl->oif && /*XXX*/
71 xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
72 xdst->u.rt.fl.fl4_src == fl->fl4_src &&
73 xdst->u.rt.fl.fl4_tos == fl->fl4_tos &&
74 xfrm_bundle_ok(policy, xdst, fl, AF_INET, 0)) {
75 dst_clone(dst);
76 break;
77 }
78 }
79 read_unlock_bh(&policy->lock);
80 return dst;
81}
82
83static int xfrm4_get_tos(struct flowi *fl) 62static int xfrm4_get_tos(struct flowi *fl)
84{ 63{
85 return fl->fl4_tos; 64 return fl->fl4_tos;
@@ -259,7 +238,6 @@ static struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
259 .dst_ops = &xfrm4_dst_ops, 238 .dst_ops = &xfrm4_dst_ops,
260 .dst_lookup = xfrm4_dst_lookup, 239 .dst_lookup = xfrm4_dst_lookup,
261 .get_saddr = xfrm4_get_saddr, 240 .get_saddr = xfrm4_get_saddr,
262 .find_bundle = __xfrm4_find_bundle,
263 .decode_session = _decode_session4, 241 .decode_session = _decode_session4,
264 .get_tos = xfrm4_get_tos, 242 .get_tos = xfrm4_get_tos,
265 .init_path = xfrm4_init_path, 243 .init_path = xfrm4_init_path,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 413054f02aab..34d2d649e396 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -82,7 +82,7 @@
82#include <linux/random.h> 82#include <linux/random.h>
83#endif 83#endif
84 84
85#include <asm/uaccess.h> 85#include <linux/uaccess.h>
86#include <asm/unaligned.h> 86#include <asm/unaligned.h>
87 87
88#include <linux/proc_fs.h> 88#include <linux/proc_fs.h>
@@ -98,7 +98,11 @@
98#endif 98#endif
99 99
100#define INFINITY_LIFE_TIME 0xFFFFFFFF 100#define INFINITY_LIFE_TIME 0xFFFFFFFF
101#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b))) 101#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
102
103#define ADDRCONF_TIMER_FUZZ_MINUS (HZ > 50 ? HZ/50 : 1)
104#define ADDRCONF_TIMER_FUZZ (HZ / 4)
105#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
102 106
103#ifdef CONFIG_SYSCTL 107#ifdef CONFIG_SYSCTL
104static void addrconf_sysctl_register(struct inet6_dev *idev); 108static void addrconf_sysctl_register(struct inet6_dev *idev);
@@ -127,8 +131,8 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
127/* 131/*
128 * Configured unicast address hash table 132 * Configured unicast address hash table
129 */ 133 */
130static struct inet6_ifaddr *inet6_addr_lst[IN6_ADDR_HSIZE]; 134static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
131static DEFINE_RWLOCK(addrconf_hash_lock); 135static DEFINE_SPINLOCK(addrconf_hash_lock);
132 136
133static void addrconf_verify(unsigned long); 137static void addrconf_verify(unsigned long);
134 138
@@ -138,8 +142,8 @@ static DEFINE_SPINLOCK(addrconf_verify_lock);
138static void addrconf_join_anycast(struct inet6_ifaddr *ifp); 142static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
139static void addrconf_leave_anycast(struct inet6_ifaddr *ifp); 143static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
140 144
141static void addrconf_bonding_change(struct net_device *dev, 145static void addrconf_type_change(struct net_device *dev,
142 unsigned long event); 146 unsigned long event);
143static int addrconf_ifdown(struct net_device *dev, int how); 147static int addrconf_ifdown(struct net_device *dev, int how);
144 148
145static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags); 149static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@ -152,8 +156,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
152 156
153static void inet6_prefix_notify(int event, struct inet6_dev *idev, 157static void inet6_prefix_notify(int event, struct inet6_dev *idev,
154 struct prefix_info *pinfo); 158 struct prefix_info *pinfo);
155static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 159static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
156 struct net_device *dev); 160 struct net_device *dev);
157 161
158static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); 162static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
159 163
@@ -250,8 +254,7 @@ static void addrconf_del_timer(struct inet6_ifaddr *ifp)
250 __in6_ifa_put(ifp); 254 __in6_ifa_put(ifp);
251} 255}
252 256
253enum addrconf_timer_t 257enum addrconf_timer_t {
254{
255 AC_NONE, 258 AC_NONE,
256 AC_DAD, 259 AC_DAD,
257 AC_RS, 260 AC_RS,
@@ -271,7 +274,8 @@ static void addrconf_mod_timer(struct inet6_ifaddr *ifp,
271 case AC_RS: 274 case AC_RS:
272 ifp->timer.function = addrconf_rs_timer; 275 ifp->timer.function = addrconf_rs_timer;
273 break; 276 break;
274 default:; 277 default:
278 break;
275 } 279 }
276 ifp->timer.expires = jiffies + when; 280 ifp->timer.expires = jiffies + when;
277 add_timer(&ifp->timer); 281 add_timer(&ifp->timer);
@@ -318,7 +322,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
318{ 322{
319 struct net_device *dev = idev->dev; 323 struct net_device *dev = idev->dev;
320 324
321 WARN_ON(idev->addr_list != NULL); 325 WARN_ON(!list_empty(&idev->addr_list));
322 WARN_ON(idev->mc_list != NULL); 326 WARN_ON(idev->mc_list != NULL);
323 327
324#ifdef NET_REFCNT_DEBUG 328#ifdef NET_REFCNT_DEBUG
@@ -326,7 +330,7 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
326#endif 330#endif
327 dev_put(dev); 331 dev_put(dev);
328 if (!idev->dead) { 332 if (!idev->dead) {
329 printk("Freeing alive inet6 device %p\n", idev); 333 pr_warning("Freeing alive inet6 device %p\n", idev);
330 return; 334 return;
331 } 335 }
332 snmp6_free_dev(idev); 336 snmp6_free_dev(idev);
@@ -351,6 +355,8 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
351 355
352 rwlock_init(&ndev->lock); 356 rwlock_init(&ndev->lock);
353 ndev->dev = dev; 357 ndev->dev = dev;
358 INIT_LIST_HEAD(&ndev->addr_list);
359
354 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf)); 360 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
355 ndev->cnf.mtu6 = dev->mtu; 361 ndev->cnf.mtu6 = dev->mtu;
356 ndev->cnf.sysctl = NULL; 362 ndev->cnf.sysctl = NULL;
@@ -402,6 +408,7 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
402#endif 408#endif
403 409
404#ifdef CONFIG_IPV6_PRIVACY 410#ifdef CONFIG_IPV6_PRIVACY
411 INIT_LIST_HEAD(&ndev->tempaddr_list);
405 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev); 412 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
406 if ((dev->flags&IFF_LOOPBACK) || 413 if ((dev->flags&IFF_LOOPBACK) ||
407 dev->type == ARPHRD_TUNNEL || 414 dev->type == ARPHRD_TUNNEL ||
@@ -439,8 +446,10 @@ static struct inet6_dev * ipv6_find_idev(struct net_device *dev)
439 446
440 ASSERT_RTNL(); 447 ASSERT_RTNL();
441 448
442 if ((idev = __in6_dev_get(dev)) == NULL) { 449 idev = __in6_dev_get(dev);
443 if ((idev = ipv6_add_dev(dev)) == NULL) 450 if (!idev) {
451 idev = ipv6_add_dev(dev);
452 if (!idev)
444 return NULL; 453 return NULL;
445 } 454 }
446 455
@@ -466,7 +475,8 @@ static void dev_forward_change(struct inet6_dev *idev)
466 else 475 else
467 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters); 476 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
468 } 477 }
469 for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) { 478
479 list_for_each_entry(ifa, &idev->addr_list, if_list) {
470 if (ifa->flags&IFA_F_TENTATIVE) 480 if (ifa->flags&IFA_F_TENTATIVE)
471 continue; 481 continue;
472 if (idev->cnf.forwarding) 482 if (idev->cnf.forwarding)
@@ -523,12 +533,16 @@ static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
523} 533}
524#endif 534#endif
525 535
526/* Nobody refers to this ifaddr, destroy it */ 536static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
537{
538 struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
539 kfree(ifp);
540}
527 541
542/* Nobody refers to this ifaddr, destroy it */
528void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp) 543void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
529{ 544{
530 WARN_ON(ifp->if_next != NULL); 545 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
531 WARN_ON(ifp->lst_next != NULL);
532 546
533#ifdef NET_REFCNT_DEBUG 547#ifdef NET_REFCNT_DEBUG
534 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n"); 548 printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
@@ -537,54 +551,46 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
537 in6_dev_put(ifp->idev); 551 in6_dev_put(ifp->idev);
538 552
539 if (del_timer(&ifp->timer)) 553 if (del_timer(&ifp->timer))
540 printk("Timer is still running, when freeing ifa=%p\n", ifp); 554 pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
541 555
542 if (!ifp->dead) { 556 if (!ifp->dead) {
543 printk("Freeing alive inet6 address %p\n", ifp); 557 pr_warning("Freeing alive inet6 address %p\n", ifp);
544 return; 558 return;
545 } 559 }
546 dst_release(&ifp->rt->u.dst); 560 dst_release(&ifp->rt->u.dst);
547 561
548 kfree(ifp); 562 call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
549} 563}
550 564
551static void 565static void
552ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp) 566ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
553{ 567{
554 struct inet6_ifaddr *ifa, **ifap; 568 struct list_head *p;
555 int ifp_scope = ipv6_addr_src_scope(&ifp->addr); 569 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
556 570
557 /* 571 /*
558 * Each device address list is sorted in order of scope - 572 * Each device address list is sorted in order of scope -
559 * global before linklocal. 573 * global before linklocal.
560 */ 574 */
561 for (ifap = &idev->addr_list; (ifa = *ifap) != NULL; 575 list_for_each(p, &idev->addr_list) {
562 ifap = &ifa->if_next) { 576 struct inet6_ifaddr *ifa
577 = list_entry(p, struct inet6_ifaddr, if_list);
563 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr)) 578 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
564 break; 579 break;
565 } 580 }
566 581
567 ifp->if_next = *ifap; 582 list_add_tail(&ifp->if_list, p);
568 *ifap = ifp;
569} 583}
570 584
571/* 585static u32 ipv6_addr_hash(const struct in6_addr *addr)
572 * Hash function taken from net_alias.c
573 */
574static u8 ipv6_addr_hash(const struct in6_addr *addr)
575{ 586{
576 __u32 word;
577
578 /* 587 /*
579 * We perform the hash function over the last 64 bits of the address 588 * We perform the hash function over the last 64 bits of the address
580 * This will include the IEEE address token on links that support it. 589 * This will include the IEEE address token on links that support it.
581 */ 590 */
582 591 return jhash_2words((__force u32)addr->s6_addr32[2],
583 word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]); 592 (__force u32)addr->s6_addr32[3], 0)
584 word ^= (word >> 16); 593 & (IN6_ADDR_HSIZE - 1);
585 word ^= (word >> 8);
586
587 return ((word ^ (word >> 4)) & 0x0f);
588} 594}
589 595
590/* On success it returns ifp with increased reference count */ 596/* On success it returns ifp with increased reference count */
@@ -595,7 +601,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
595{ 601{
596 struct inet6_ifaddr *ifa = NULL; 602 struct inet6_ifaddr *ifa = NULL;
597 struct rt6_info *rt; 603 struct rt6_info *rt;
598 int hash; 604 unsigned int hash;
599 int err = 0; 605 int err = 0;
600 int addr_type = ipv6_addr_type(addr); 606 int addr_type = ipv6_addr_type(addr);
601 607
@@ -616,7 +622,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
616 goto out2; 622 goto out2;
617 } 623 }
618 624
619 write_lock(&addrconf_hash_lock); 625 spin_lock(&addrconf_hash_lock);
620 626
621 /* Ignore adding duplicate addresses on an interface */ 627 /* Ignore adding duplicate addresses on an interface */
622 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) { 628 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
@@ -643,6 +649,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
643 649
644 spin_lock_init(&ifa->lock); 650 spin_lock_init(&ifa->lock);
645 init_timer(&ifa->timer); 651 init_timer(&ifa->timer);
652 INIT_HLIST_NODE(&ifa->addr_lst);
646 ifa->timer.data = (unsigned long) ifa; 653 ifa->timer.data = (unsigned long) ifa;
647 ifa->scope = scope; 654 ifa->scope = scope;
648 ifa->prefix_len = pfxlen; 655 ifa->prefix_len = pfxlen;
@@ -669,10 +676,8 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
669 /* Add to big hash table */ 676 /* Add to big hash table */
670 hash = ipv6_addr_hash(addr); 677 hash = ipv6_addr_hash(addr);
671 678
672 ifa->lst_next = inet6_addr_lst[hash]; 679 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
673 inet6_addr_lst[hash] = ifa; 680 spin_unlock(&addrconf_hash_lock);
674 in6_ifa_hold(ifa);
675 write_unlock(&addrconf_hash_lock);
676 681
677 write_lock(&idev->lock); 682 write_lock(&idev->lock);
678 /* Add to inet6_dev unicast addr list. */ 683 /* Add to inet6_dev unicast addr list. */
@@ -680,8 +685,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr, int pfxlen,
680 685
681#ifdef CONFIG_IPV6_PRIVACY 686#ifdef CONFIG_IPV6_PRIVACY
682 if (ifa->flags&IFA_F_TEMPORARY) { 687 if (ifa->flags&IFA_F_TEMPORARY) {
683 ifa->tmp_next = idev->tempaddr_list; 688 list_add(&ifa->tmp_list, &idev->tempaddr_list);
684 idev->tempaddr_list = ifa;
685 in6_ifa_hold(ifa); 689 in6_ifa_hold(ifa);
686 } 690 }
687#endif 691#endif
@@ -700,7 +704,7 @@ out2:
700 704
701 return ifa; 705 return ifa;
702out: 706out:
703 write_unlock(&addrconf_hash_lock); 707 spin_unlock(&addrconf_hash_lock);
704 goto out2; 708 goto out2;
705} 709}
706 710
@@ -708,7 +712,7 @@ out:
708 712
709static void ipv6_del_addr(struct inet6_ifaddr *ifp) 713static void ipv6_del_addr(struct inet6_ifaddr *ifp)
710{ 714{
711 struct inet6_ifaddr *ifa, **ifap; 715 struct inet6_ifaddr *ifa, *ifn;
712 struct inet6_dev *idev = ifp->idev; 716 struct inet6_dev *idev = ifp->idev;
713 int hash; 717 int hash;
714 int deleted = 0, onlink = 0; 718 int deleted = 0, onlink = 0;
@@ -718,42 +722,27 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
718 722
719 ifp->dead = 1; 723 ifp->dead = 1;
720 724
721 write_lock_bh(&addrconf_hash_lock); 725 spin_lock_bh(&addrconf_hash_lock);
722 for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL; 726 hlist_del_init_rcu(&ifp->addr_lst);
723 ifap = &ifa->lst_next) { 727 spin_unlock_bh(&addrconf_hash_lock);
724 if (ifa == ifp) {
725 *ifap = ifa->lst_next;
726 __in6_ifa_put(ifp);
727 ifa->lst_next = NULL;
728 break;
729 }
730 }
731 write_unlock_bh(&addrconf_hash_lock);
732 728
733 write_lock_bh(&idev->lock); 729 write_lock_bh(&idev->lock);
734#ifdef CONFIG_IPV6_PRIVACY 730#ifdef CONFIG_IPV6_PRIVACY
735 if (ifp->flags&IFA_F_TEMPORARY) { 731 if (ifp->flags&IFA_F_TEMPORARY) {
736 for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL; 732 list_del(&ifp->tmp_list);
737 ifap = &ifa->tmp_next) { 733 if (ifp->ifpub) {
738 if (ifa == ifp) { 734 in6_ifa_put(ifp->ifpub);
739 *ifap = ifa->tmp_next; 735 ifp->ifpub = NULL;
740 if (ifp->ifpub) {
741 in6_ifa_put(ifp->ifpub);
742 ifp->ifpub = NULL;
743 }
744 __in6_ifa_put(ifp);
745 ifa->tmp_next = NULL;
746 break;
747 }
748 } 736 }
737 __in6_ifa_put(ifp);
749 } 738 }
750#endif 739#endif
751 740
752 for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) { 741 list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
753 if (ifa == ifp) { 742 if (ifa == ifp) {
754 *ifap = ifa->if_next; 743 list_del_init(&ifp->if_list);
755 __in6_ifa_put(ifp); 744 __in6_ifa_put(ifp);
756 ifa->if_next = NULL; 745
757 if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0) 746 if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
758 break; 747 break;
759 deleted = 1; 748 deleted = 1;
@@ -786,7 +775,6 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
786 } 775 }
787 } 776 }
788 } 777 }
789 ifap = &ifa->if_next;
790 } 778 }
791 write_unlock_bh(&idev->lock); 779 write_unlock_bh(&idev->lock);
792 780
@@ -1165,7 +1153,7 @@ int ipv6_dev_get_saddr(struct net *net, struct net_device *dst_dev,
1165 continue; 1153 continue;
1166 1154
1167 read_lock_bh(&idev->lock); 1155 read_lock_bh(&idev->lock);
1168 for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) { 1156 list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
1169 int i; 1157 int i;
1170 1158
1171 /* 1159 /*
@@ -1243,7 +1231,6 @@ try_nextdev:
1243 in6_ifa_put(hiscore->ifa); 1231 in6_ifa_put(hiscore->ifa);
1244 return 0; 1232 return 0;
1245} 1233}
1246
1247EXPORT_SYMBOL(ipv6_dev_get_saddr); 1234EXPORT_SYMBOL(ipv6_dev_get_saddr);
1248 1235
1249int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, 1236int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
@@ -1253,12 +1240,14 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1253 int err = -EADDRNOTAVAIL; 1240 int err = -EADDRNOTAVAIL;
1254 1241
1255 rcu_read_lock(); 1242 rcu_read_lock();
1256 if ((idev = __in6_dev_get(dev)) != NULL) { 1243 idev = __in6_dev_get(dev);
1244 if (idev) {
1257 struct inet6_ifaddr *ifp; 1245 struct inet6_ifaddr *ifp;
1258 1246
1259 read_lock_bh(&idev->lock); 1247 read_lock_bh(&idev->lock);
1260 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { 1248 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1261 if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) { 1249 if (ifp->scope == IFA_LINK &&
1250 !(ifp->flags & banned_flags)) {
1262 ipv6_addr_copy(addr, &ifp->addr); 1251 ipv6_addr_copy(addr, &ifp->addr);
1263 err = 0; 1252 err = 0;
1264 break; 1253 break;
@@ -1276,7 +1265,7 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1276 struct inet6_ifaddr *ifp; 1265 struct inet6_ifaddr *ifp;
1277 1266
1278 read_lock_bh(&idev->lock); 1267 read_lock_bh(&idev->lock);
1279 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) 1268 list_for_each_entry(ifp, &idev->addr_list, if_list)
1280 cnt++; 1269 cnt++;
1281 read_unlock_bh(&idev->lock); 1270 read_unlock_bh(&idev->lock);
1282 return cnt; 1271 return cnt;
@@ -1285,11 +1274,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev)
1285int ipv6_chk_addr(struct net *net, struct in6_addr *addr, 1274int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
1286 struct net_device *dev, int strict) 1275 struct net_device *dev, int strict)
1287{ 1276{
1288 struct inet6_ifaddr * ifp; 1277 struct inet6_ifaddr *ifp = NULL;
1289 u8 hash = ipv6_addr_hash(addr); 1278 struct hlist_node *node;
1279 unsigned int hash = ipv6_addr_hash(addr);
1290 1280
1291 read_lock_bh(&addrconf_hash_lock); 1281 rcu_read_lock_bh();
1292 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1282 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1293 if (!net_eq(dev_net(ifp->idev->dev), net)) 1283 if (!net_eq(dev_net(ifp->idev->dev), net))
1294 continue; 1284 continue;
1295 if (ipv6_addr_equal(&ifp->addr, addr) && 1285 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -1299,27 +1289,28 @@ int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
1299 break; 1289 break;
1300 } 1290 }
1301 } 1291 }
1302 read_unlock_bh(&addrconf_hash_lock); 1292 rcu_read_unlock_bh();
1293
1303 return ifp != NULL; 1294 return ifp != NULL;
1304} 1295}
1305EXPORT_SYMBOL(ipv6_chk_addr); 1296EXPORT_SYMBOL(ipv6_chk_addr);
1306 1297
1307static 1298static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1308int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr, 1299 struct net_device *dev)
1309 struct net_device *dev)
1310{ 1300{
1311 struct inet6_ifaddr * ifp; 1301 unsigned int hash = ipv6_addr_hash(addr);
1312 u8 hash = ipv6_addr_hash(addr); 1302 struct inet6_ifaddr *ifp;
1303 struct hlist_node *node;
1313 1304
1314 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1305 hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1315 if (!net_eq(dev_net(ifp->idev->dev), net)) 1306 if (!net_eq(dev_net(ifp->idev->dev), net))
1316 continue; 1307 continue;
1317 if (ipv6_addr_equal(&ifp->addr, addr)) { 1308 if (ipv6_addr_equal(&ifp->addr, addr)) {
1318 if (dev == NULL || ifp->idev->dev == dev) 1309 if (dev == NULL || ifp->idev->dev == dev)
1319 break; 1310 return true;
1320 } 1311 }
1321 } 1312 }
1322 return ifp != NULL; 1313 return false;
1323} 1314}
1324 1315
1325int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev) 1316int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
@@ -1333,7 +1324,7 @@ int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
1333 idev = __in6_dev_get(dev); 1324 idev = __in6_dev_get(dev);
1334 if (idev) { 1325 if (idev) {
1335 read_lock_bh(&idev->lock); 1326 read_lock_bh(&idev->lock);
1336 for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) { 1327 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1337 onlink = ipv6_prefix_equal(addr, &ifa->addr, 1328 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1338 ifa->prefix_len); 1329 ifa->prefix_len);
1339 if (onlink) 1330 if (onlink)
@@ -1350,24 +1341,26 @@ EXPORT_SYMBOL(ipv6_chk_prefix);
1350struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr, 1341struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1351 struct net_device *dev, int strict) 1342 struct net_device *dev, int strict)
1352{ 1343{
1353 struct inet6_ifaddr * ifp; 1344 struct inet6_ifaddr *ifp, *result = NULL;
1354 u8 hash = ipv6_addr_hash(addr); 1345 unsigned int hash = ipv6_addr_hash(addr);
1346 struct hlist_node *node;
1355 1347
1356 read_lock_bh(&addrconf_hash_lock); 1348 rcu_read_lock_bh();
1357 for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) { 1349 hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
1358 if (!net_eq(dev_net(ifp->idev->dev), net)) 1350 if (!net_eq(dev_net(ifp->idev->dev), net))
1359 continue; 1351 continue;
1360 if (ipv6_addr_equal(&ifp->addr, addr)) { 1352 if (ipv6_addr_equal(&ifp->addr, addr)) {
1361 if (dev == NULL || ifp->idev->dev == dev || 1353 if (dev == NULL || ifp->idev->dev == dev ||
1362 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) { 1354 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1355 result = ifp;
1363 in6_ifa_hold(ifp); 1356 in6_ifa_hold(ifp);
1364 break; 1357 break;
1365 } 1358 }
1366 } 1359 }
1367 } 1360 }
1368 read_unlock_bh(&addrconf_hash_lock); 1361 rcu_read_unlock_bh();
1369 1362
1370 return ifp; 1363 return result;
1371} 1364}
1372 1365
1373/* Gets referenced address, destroys ifaddr */ 1366/* Gets referenced address, destroys ifaddr */
@@ -1570,7 +1563,7 @@ static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
1570 struct inet6_ifaddr *ifp; 1563 struct inet6_ifaddr *ifp;
1571 1564
1572 read_lock_bh(&idev->lock); 1565 read_lock_bh(&idev->lock);
1573 for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) { 1566 list_for_each_entry(ifp, &idev->addr_list, if_list) {
1574 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) { 1567 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
1575 memcpy(eui, ifp->addr.s6_addr+8, 8); 1568 memcpy(eui, ifp->addr.s6_addr+8, 8);
1576 err = 0; 1569 err = 0;
@@ -1738,7 +1731,8 @@ static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
1738 1731
1739 ASSERT_RTNL(); 1732 ASSERT_RTNL();
1740 1733
1741 if ((idev = ipv6_find_idev(dev)) == NULL) 1734 idev = ipv6_find_idev(dev);
1735 if (!idev)
1742 return NULL; 1736 return NULL;
1743 1737
1744 /* Add default multicast route */ 1738 /* Add default multicast route */
@@ -1971,7 +1965,7 @@ ok:
1971#ifdef CONFIG_IPV6_PRIVACY 1965#ifdef CONFIG_IPV6_PRIVACY
1972 read_lock_bh(&in6_dev->lock); 1966 read_lock_bh(&in6_dev->lock);
1973 /* update all temporary addresses in the list */ 1967 /* update all temporary addresses in the list */
1974 for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) { 1968 list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
1975 /* 1969 /*
1976 * When adjusting the lifetimes of an existing 1970 * When adjusting the lifetimes of an existing
1977 * temporary address, only lower the lifetimes. 1971 * temporary address, only lower the lifetimes.
@@ -2174,7 +2168,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2174 return -ENXIO; 2168 return -ENXIO;
2175 2169
2176 read_lock_bh(&idev->lock); 2170 read_lock_bh(&idev->lock);
2177 for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) { 2171 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2178 if (ifp->prefix_len == plen && 2172 if (ifp->prefix_len == plen &&
2179 ipv6_addr_equal(pfx, &ifp->addr)) { 2173 ipv6_addr_equal(pfx, &ifp->addr)) {
2180 in6_ifa_hold(ifp); 2174 in6_ifa_hold(ifp);
@@ -2185,7 +2179,7 @@ static int inet6_addr_del(struct net *net, int ifindex, struct in6_addr *pfx,
2185 /* If the last address is deleted administratively, 2179 /* If the last address is deleted administratively,
2186 disable IPv6 on this interface. 2180 disable IPv6 on this interface.
2187 */ 2181 */
2188 if (idev->addr_list == NULL) 2182 if (list_empty(&idev->addr_list))
2189 addrconf_ifdown(idev->dev, 1); 2183 addrconf_ifdown(idev->dev, 1);
2190 return 0; 2184 return 0;
2191 } 2185 }
@@ -2446,7 +2440,8 @@ static void addrconf_ip6_tnl_config(struct net_device *dev)
2446 2440
2447 ASSERT_RTNL(); 2441 ASSERT_RTNL();
2448 2442
2449 if ((idev = addrconf_add_dev(dev)) == NULL) { 2443 idev = addrconf_add_dev(dev);
2444 if (!idev) {
2450 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n"); 2445 printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
2451 return; 2446 return;
2452 } 2447 }
@@ -2461,7 +2456,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2461 int run_pending = 0; 2456 int run_pending = 0;
2462 int err; 2457 int err;
2463 2458
2464 switch(event) { 2459 switch (event) {
2465 case NETDEV_REGISTER: 2460 case NETDEV_REGISTER:
2466 if (!idev && dev->mtu >= IPV6_MIN_MTU) { 2461 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
2467 idev = ipv6_add_dev(dev); 2462 idev = ipv6_add_dev(dev);
@@ -2469,6 +2464,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2469 return notifier_from_errno(-ENOMEM); 2464 return notifier_from_errno(-ENOMEM);
2470 } 2465 }
2471 break; 2466 break;
2467
2472 case NETDEV_UP: 2468 case NETDEV_UP:
2473 case NETDEV_CHANGE: 2469 case NETDEV_CHANGE:
2474 if (dev->flags & IFF_SLAVE) 2470 if (dev->flags & IFF_SLAVE)
@@ -2498,10 +2494,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2498 } 2494 }
2499 2495
2500 if (idev) { 2496 if (idev) {
2501 if (idev->if_flags & IF_READY) { 2497 if (idev->if_flags & IF_READY)
2502 /* device is already configured. */ 2498 /* device is already configured. */
2503 break; 2499 break;
2504 }
2505 idev->if_flags |= IF_READY; 2500 idev->if_flags |= IF_READY;
2506 } 2501 }
2507 2502
@@ -2513,7 +2508,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2513 run_pending = 1; 2508 run_pending = 1;
2514 } 2509 }
2515 2510
2516 switch(dev->type) { 2511 switch (dev->type) {
2517#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE) 2512#if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
2518 case ARPHRD_SIT: 2513 case ARPHRD_SIT:
2519 addrconf_sit_config(dev); 2514 addrconf_sit_config(dev);
@@ -2530,25 +2525,30 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2530 addrconf_dev_config(dev); 2525 addrconf_dev_config(dev);
2531 break; 2526 break;
2532 } 2527 }
2528
2533 if (idev) { 2529 if (idev) {
2534 if (run_pending) 2530 if (run_pending)
2535 addrconf_dad_run(idev); 2531 addrconf_dad_run(idev);
2536 2532
2537 /* If the MTU changed during the interface down, when the 2533 /*
2538 interface up, the changed MTU must be reflected in the 2534 * If the MTU changed during the interface down,
2539 idev as well as routers. 2535 * when the interface up, the changed MTU must be
2536 * reflected in the idev as well as routers.
2540 */ 2537 */
2541 if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) { 2538 if (idev->cnf.mtu6 != dev->mtu &&
2539 dev->mtu >= IPV6_MIN_MTU) {
2542 rt6_mtu_change(dev, dev->mtu); 2540 rt6_mtu_change(dev, dev->mtu);
2543 idev->cnf.mtu6 = dev->mtu; 2541 idev->cnf.mtu6 = dev->mtu;
2544 } 2542 }
2545 idev->tstamp = jiffies; 2543 idev->tstamp = jiffies;
2546 inet6_ifinfo_notify(RTM_NEWLINK, idev); 2544 inet6_ifinfo_notify(RTM_NEWLINK, idev);
2547 /* If the changed mtu during down is lower than IPV6_MIN_MTU 2545
2548 stop IPv6 on this interface. 2546 /*
2547 * If the changed mtu during down is lower than
2548 * IPV6_MIN_MTU stop IPv6 on this interface.
2549 */ 2549 */
2550 if (dev->mtu < IPV6_MIN_MTU) 2550 if (dev->mtu < IPV6_MIN_MTU)
2551 addrconf_ifdown(dev, event != NETDEV_DOWN); 2551 addrconf_ifdown(dev, 1);
2552 } 2552 }
2553 break; 2553 break;
2554 2554
@@ -2565,7 +2565,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2565 break; 2565 break;
2566 } 2566 }
2567 2567
2568 /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ 2568 /*
2569 * MTU falled under IPV6_MIN_MTU.
2570 * Stop IPv6 on this interface.
2571 */
2569 2572
2570 case NETDEV_DOWN: 2573 case NETDEV_DOWN:
2571 case NETDEV_UNREGISTER: 2574 case NETDEV_UNREGISTER:
@@ -2585,9 +2588,10 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2585 return notifier_from_errno(err); 2588 return notifier_from_errno(err);
2586 } 2589 }
2587 break; 2590 break;
2588 case NETDEV_BONDING_OLDTYPE: 2591
2589 case NETDEV_BONDING_NEWTYPE: 2592 case NETDEV_PRE_TYPE_CHANGE:
2590 addrconf_bonding_change(dev, event); 2593 case NETDEV_POST_TYPE_CHANGE:
2594 addrconf_type_change(dev, event);
2591 break; 2595 break;
2592 } 2596 }
2593 2597
@@ -2599,28 +2603,27 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
2599 */ 2603 */
2600static struct notifier_block ipv6_dev_notf = { 2604static struct notifier_block ipv6_dev_notf = {
2601 .notifier_call = addrconf_notify, 2605 .notifier_call = addrconf_notify,
2602 .priority = 0
2603}; 2606};
2604 2607
2605static void addrconf_bonding_change(struct net_device *dev, unsigned long event) 2608static void addrconf_type_change(struct net_device *dev, unsigned long event)
2606{ 2609{
2607 struct inet6_dev *idev; 2610 struct inet6_dev *idev;
2608 ASSERT_RTNL(); 2611 ASSERT_RTNL();
2609 2612
2610 idev = __in6_dev_get(dev); 2613 idev = __in6_dev_get(dev);
2611 2614
2612 if (event == NETDEV_BONDING_NEWTYPE) 2615 if (event == NETDEV_POST_TYPE_CHANGE)
2613 ipv6_mc_remap(idev); 2616 ipv6_mc_remap(idev);
2614 else if (event == NETDEV_BONDING_OLDTYPE) 2617 else if (event == NETDEV_PRE_TYPE_CHANGE)
2615 ipv6_mc_unmap(idev); 2618 ipv6_mc_unmap(idev);
2616} 2619}
2617 2620
2618static int addrconf_ifdown(struct net_device *dev, int how) 2621static int addrconf_ifdown(struct net_device *dev, int how)
2619{ 2622{
2620 struct inet6_dev *idev;
2621 struct inet6_ifaddr *ifa, *keep_list, **bifa;
2622 struct net *net = dev_net(dev); 2623 struct net *net = dev_net(dev);
2623 int i; 2624 struct inet6_dev *idev;
2625 struct inet6_ifaddr *ifa;
2626 LIST_HEAD(keep_list);
2624 2627
2625 ASSERT_RTNL(); 2628 ASSERT_RTNL();
2626 2629
@@ -2631,8 +2634,9 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2631 if (idev == NULL) 2634 if (idev == NULL)
2632 return -ENODEV; 2635 return -ENODEV;
2633 2636
2634 /* Step 1: remove reference to ipv6 device from parent device. 2637 /*
2635 Do not dev_put! 2638 * Step 1: remove reference to ipv6 device from parent device.
2639 * Do not dev_put!
2636 */ 2640 */
2637 if (how) { 2641 if (how) {
2638 idev->dead = 1; 2642 idev->dead = 1;
@@ -2645,40 +2649,21 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2645 2649
2646 } 2650 }
2647 2651
2648 /* Step 2: clear hash table */
2649 for (i=0; i<IN6_ADDR_HSIZE; i++) {
2650 bifa = &inet6_addr_lst[i];
2651
2652 write_lock_bh(&addrconf_hash_lock);
2653 while ((ifa = *bifa) != NULL) {
2654 if (ifa->idev == idev &&
2655 (how || !(ifa->flags&IFA_F_PERMANENT) ||
2656 ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2657 *bifa = ifa->lst_next;
2658 ifa->lst_next = NULL;
2659 __in6_ifa_put(ifa);
2660 continue;
2661 }
2662 bifa = &ifa->lst_next;
2663 }
2664 write_unlock_bh(&addrconf_hash_lock);
2665 }
2666
2667 write_lock_bh(&idev->lock); 2652 write_lock_bh(&idev->lock);
2668 2653
2669 /* Step 3: clear flags for stateless addrconf */ 2654 /* Step 2: clear flags for stateless addrconf */
2670 if (!how) 2655 if (!how)
2671 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY); 2656 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
2672 2657
2673 /* Step 4: clear address list */
2674#ifdef CONFIG_IPV6_PRIVACY 2658#ifdef CONFIG_IPV6_PRIVACY
2675 if (how && del_timer(&idev->regen_timer)) 2659 if (how && del_timer(&idev->regen_timer))
2676 in6_dev_put(idev); 2660 in6_dev_put(idev);
2677 2661
2678 /* clear tempaddr list */ 2662 /* Step 3: clear tempaddr list */
2679 while ((ifa = idev->tempaddr_list) != NULL) { 2663 while (!list_empty(&idev->tempaddr_list)) {
2680 idev->tempaddr_list = ifa->tmp_next; 2664 ifa = list_first_entry(&idev->tempaddr_list,
2681 ifa->tmp_next = NULL; 2665 struct inet6_ifaddr, tmp_list);
2666 list_del(&ifa->tmp_list);
2682 ifa->dead = 1; 2667 ifa->dead = 1;
2683 write_unlock_bh(&idev->lock); 2668 write_unlock_bh(&idev->lock);
2684 spin_lock_bh(&ifa->lock); 2669 spin_lock_bh(&ifa->lock);
@@ -2692,23 +2677,18 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2692 write_lock_bh(&idev->lock); 2677 write_lock_bh(&idev->lock);
2693 } 2678 }
2694#endif 2679#endif
2695 keep_list = NULL;
2696 bifa = &keep_list;
2697 while ((ifa = idev->addr_list) != NULL) {
2698 idev->addr_list = ifa->if_next;
2699 ifa->if_next = NULL;
2700 2680
2681 while (!list_empty(&idev->addr_list)) {
2682 ifa = list_first_entry(&idev->addr_list,
2683 struct inet6_ifaddr, if_list);
2701 addrconf_del_timer(ifa); 2684 addrconf_del_timer(ifa);
2702 2685
2703 /* If just doing link down, and address is permanent 2686 /* If just doing link down, and address is permanent
2704 and not link-local, then retain it. */ 2687 and not link-local, then retain it. */
2705 if (how == 0 && 2688 if (!how &&
2706 (ifa->flags&IFA_F_PERMANENT) && 2689 (ifa->flags&IFA_F_PERMANENT) &&
2707 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) { 2690 !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
2708 2691 list_move_tail(&ifa->if_list, &keep_list);
2709 /* Move to holding list */
2710 *bifa = ifa;
2711 bifa = &ifa->if_next;
2712 2692
2713 /* If not doing DAD on this address, just keep it. */ 2693 /* If not doing DAD on this address, just keep it. */
2714 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) || 2694 if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
@@ -2723,24 +2703,32 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2723 /* Flag it for later restoration when link comes up */ 2703 /* Flag it for later restoration when link comes up */
2724 ifa->flags |= IFA_F_TENTATIVE; 2704 ifa->flags |= IFA_F_TENTATIVE;
2725 in6_ifa_hold(ifa); 2705 in6_ifa_hold(ifa);
2706 write_unlock_bh(&idev->lock);
2726 } else { 2707 } else {
2708 list_del(&ifa->if_list);
2727 ifa->dead = 1; 2709 ifa->dead = 1;
2710 write_unlock_bh(&idev->lock);
2711
2712 /* clear hash table */
2713 spin_lock_bh(&addrconf_hash_lock);
2714 hlist_del_init_rcu(&ifa->addr_lst);
2715 spin_unlock_bh(&addrconf_hash_lock);
2728 } 2716 }
2729 write_unlock_bh(&idev->lock);
2730 2717
2731 __ipv6_ifa_notify(RTM_DELADDR, ifa); 2718 __ipv6_ifa_notify(RTM_DELADDR, ifa);
2732 atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); 2719 if (ifa->dead)
2720 atomic_notifier_call_chain(&inet6addr_chain,
2721 NETDEV_DOWN, ifa);
2733 in6_ifa_put(ifa); 2722 in6_ifa_put(ifa);
2734 2723
2735 write_lock_bh(&idev->lock); 2724 write_lock_bh(&idev->lock);
2736 } 2725 }
2737 2726
2738 idev->addr_list = keep_list; 2727 list_splice(&keep_list, &idev->addr_list);
2739 2728
2740 write_unlock_bh(&idev->lock); 2729 write_unlock_bh(&idev->lock);
2741 2730
2742 /* Step 5: Discard multicast list */ 2731 /* Step 5: Discard multicast list */
2743
2744 if (how) 2732 if (how)
2745 ipv6_mc_destroy_dev(idev); 2733 ipv6_mc_destroy_dev(idev);
2746 else 2734 else
@@ -2748,8 +2736,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
2748 2736
2749 idev->tstamp = jiffies; 2737 idev->tstamp = jiffies;
2750 2738
2751 /* Shot the device (if unregistered) */ 2739 /* Last: Shot the device (if unregistered) */
2752
2753 if (how) { 2740 if (how) {
2754 addrconf_sysctl_unregister(idev); 2741 addrconf_sysctl_unregister(idev);
2755 neigh_parms_release(&nd_tbl, idev->nd_parms); 2742 neigh_parms_release(&nd_tbl, idev->nd_parms);
@@ -2860,7 +2847,7 @@ static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags)
2860 * Optimistic nodes can start receiving 2847 * Optimistic nodes can start receiving
2861 * Frames right away 2848 * Frames right away
2862 */ 2849 */
2863 if(ifp->flags & IFA_F_OPTIMISTIC) 2850 if (ifp->flags & IFA_F_OPTIMISTIC)
2864 ip6_ins_rt(ifp->rt); 2851 ip6_ins_rt(ifp->rt);
2865 2852
2866 addrconf_dad_kick(ifp); 2853 addrconf_dad_kick(ifp);
@@ -2910,7 +2897,7 @@ out:
2910 2897
2911static void addrconf_dad_completed(struct inet6_ifaddr *ifp) 2898static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2912{ 2899{
2913 struct net_device * dev = ifp->idev->dev; 2900 struct net_device *dev = ifp->idev->dev;
2914 2901
2915 /* 2902 /*
2916 * Configure the address for reception. Now it is valid. 2903 * Configure the address for reception. Now it is valid.
@@ -2941,11 +2928,12 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
2941 } 2928 }
2942} 2929}
2943 2930
2944static void addrconf_dad_run(struct inet6_dev *idev) { 2931static void addrconf_dad_run(struct inet6_dev *idev)
2932{
2945 struct inet6_ifaddr *ifp; 2933 struct inet6_ifaddr *ifp;
2946 2934
2947 read_lock_bh(&idev->lock); 2935 read_lock_bh(&idev->lock);
2948 for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) { 2936 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2949 spin_lock(&ifp->lock); 2937 spin_lock(&ifp->lock);
2950 if (!(ifp->flags & IFA_F_TENTATIVE)) { 2938 if (!(ifp->flags & IFA_F_TENTATIVE)) {
2951 spin_unlock(&ifp->lock); 2939 spin_unlock(&ifp->lock);
@@ -2970,36 +2958,35 @@ static struct inet6_ifaddr *if6_get_first(struct seq_file *seq)
2970 struct net *net = seq_file_net(seq); 2958 struct net *net = seq_file_net(seq);
2971 2959
2972 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) { 2960 for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
2973 ifa = inet6_addr_lst[state->bucket]; 2961 struct hlist_node *n;
2974 2962 hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
2975 while (ifa && !net_eq(dev_net(ifa->idev->dev), net)) 2963 addr_lst)
2976 ifa = ifa->lst_next; 2964 if (net_eq(dev_net(ifa->idev->dev), net))
2977 if (ifa) 2965 return ifa;
2978 break;
2979 } 2966 }
2980 return ifa; 2967 return NULL;
2981} 2968}
2982 2969
2983static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa) 2970static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
2971 struct inet6_ifaddr *ifa)
2984{ 2972{
2985 struct if6_iter_state *state = seq->private; 2973 struct if6_iter_state *state = seq->private;
2986 struct net *net = seq_file_net(seq); 2974 struct net *net = seq_file_net(seq);
2975 struct hlist_node *n = &ifa->addr_lst;
2987 2976
2988 ifa = ifa->lst_next; 2977 hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
2989try_again: 2978 if (net_eq(dev_net(ifa->idev->dev), net))
2990 if (ifa) { 2979 return ifa;
2991 if (!net_eq(dev_net(ifa->idev->dev), net)) {
2992 ifa = ifa->lst_next;
2993 goto try_again;
2994 }
2995 }
2996 2980
2997 if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) { 2981 while (++state->bucket < IN6_ADDR_HSIZE) {
2998 ifa = inet6_addr_lst[state->bucket]; 2982 hlist_for_each_entry(ifa, n,
2999 goto try_again; 2983 &inet6_addr_lst[state->bucket], addr_lst) {
2984 if (net_eq(dev_net(ifa->idev->dev), net))
2985 return ifa;
2986 }
3000 } 2987 }
3001 2988
3002 return ifa; 2989 return NULL;
3003} 2990}
3004 2991
3005static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos) 2992static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
@@ -3007,15 +2994,15 @@ static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
3007 struct inet6_ifaddr *ifa = if6_get_first(seq); 2994 struct inet6_ifaddr *ifa = if6_get_first(seq);
3008 2995
3009 if (ifa) 2996 if (ifa)
3010 while(pos && (ifa = if6_get_next(seq, ifa)) != NULL) 2997 while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
3011 --pos; 2998 --pos;
3012 return pos ? NULL : ifa; 2999 return pos ? NULL : ifa;
3013} 3000}
3014 3001
3015static void *if6_seq_start(struct seq_file *seq, loff_t *pos) 3002static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
3016 __acquires(addrconf_hash_lock) 3003 __acquires(rcu)
3017{ 3004{
3018 read_lock_bh(&addrconf_hash_lock); 3005 rcu_read_lock_bh();
3019 return if6_get_idx(seq, *pos); 3006 return if6_get_idx(seq, *pos);
3020} 3007}
3021 3008
@@ -3029,9 +3016,9 @@ static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3029} 3016}
3030 3017
3031static void if6_seq_stop(struct seq_file *seq, void *v) 3018static void if6_seq_stop(struct seq_file *seq, void *v)
3032 __releases(addrconf_hash_lock) 3019 __releases(rcu)
3033{ 3020{
3034 read_unlock_bh(&addrconf_hash_lock); 3021 rcu_read_unlock_bh();
3035} 3022}
3036 3023
3037static int if6_seq_show(struct seq_file *seq, void *v) 3024static int if6_seq_show(struct seq_file *seq, void *v)
@@ -3101,10 +3088,12 @@ void if6_proc_exit(void)
3101int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr) 3088int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3102{ 3089{
3103 int ret = 0; 3090 int ret = 0;
3104 struct inet6_ifaddr * ifp; 3091 struct inet6_ifaddr *ifp = NULL;
3105 u8 hash = ipv6_addr_hash(addr); 3092 struct hlist_node *n;
3106 read_lock_bh(&addrconf_hash_lock); 3093 unsigned int hash = ipv6_addr_hash(addr);
3107 for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) { 3094
3095 rcu_read_lock_bh();
3096 hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
3108 if (!net_eq(dev_net(ifp->idev->dev), net)) 3097 if (!net_eq(dev_net(ifp->idev->dev), net))
3109 continue; 3098 continue;
3110 if (ipv6_addr_equal(&ifp->addr, addr) && 3099 if (ipv6_addr_equal(&ifp->addr, addr) &&
@@ -3113,7 +3102,7 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3113 break; 3102 break;
3114 } 3103 }
3115 } 3104 }
3116 read_unlock_bh(&addrconf_hash_lock); 3105 rcu_read_unlock_bh();
3117 return ret; 3106 return ret;
3118} 3107}
3119#endif 3108#endif
@@ -3124,43 +3113,35 @@ int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
3124 3113
3125static void addrconf_verify(unsigned long foo) 3114static void addrconf_verify(unsigned long foo)
3126{ 3115{
3116 unsigned long now, next, next_sec, next_sched;
3127 struct inet6_ifaddr *ifp; 3117 struct inet6_ifaddr *ifp;
3128 unsigned long now, next; 3118 struct hlist_node *node;
3129 int i; 3119 int i;
3130 3120
3131 spin_lock_bh(&addrconf_verify_lock); 3121 rcu_read_lock_bh();
3122 spin_lock(&addrconf_verify_lock);
3132 now = jiffies; 3123 now = jiffies;
3133 next = now + ADDR_CHECK_FREQUENCY; 3124 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
3134 3125
3135 del_timer(&addr_chk_timer); 3126 del_timer(&addr_chk_timer);
3136 3127
3137 for (i=0; i < IN6_ADDR_HSIZE; i++) { 3128 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3138
3139restart: 3129restart:
3140 read_lock(&addrconf_hash_lock); 3130 hlist_for_each_entry_rcu(ifp, node,
3141 for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) { 3131 &inet6_addr_lst[i], addr_lst) {
3142 unsigned long age; 3132 unsigned long age;
3143#ifdef CONFIG_IPV6_PRIVACY
3144 unsigned long regen_advance;
3145#endif
3146 3133
3147 if (ifp->flags & IFA_F_PERMANENT) 3134 if (ifp->flags & IFA_F_PERMANENT)
3148 continue; 3135 continue;
3149 3136
3150 spin_lock(&ifp->lock); 3137 spin_lock(&ifp->lock);
3151 age = (now - ifp->tstamp) / HZ; 3138 /* We try to batch several events at once. */
3152 3139 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
3153#ifdef CONFIG_IPV6_PRIVACY
3154 regen_advance = ifp->idev->cnf.regen_max_retry *
3155 ifp->idev->cnf.dad_transmits *
3156 ifp->idev->nd_parms->retrans_time / HZ;
3157#endif
3158 3140
3159 if (ifp->valid_lft != INFINITY_LIFE_TIME && 3141 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
3160 age >= ifp->valid_lft) { 3142 age >= ifp->valid_lft) {
3161 spin_unlock(&ifp->lock); 3143 spin_unlock(&ifp->lock);
3162 in6_ifa_hold(ifp); 3144 in6_ifa_hold(ifp);
3163 read_unlock(&addrconf_hash_lock);
3164 ipv6_del_addr(ifp); 3145 ipv6_del_addr(ifp);
3165 goto restart; 3146 goto restart;
3166 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) { 3147 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
@@ -3182,7 +3163,6 @@ restart:
3182 3163
3183 if (deprecate) { 3164 if (deprecate) {
3184 in6_ifa_hold(ifp); 3165 in6_ifa_hold(ifp);
3185 read_unlock(&addrconf_hash_lock);
3186 3166
3187 ipv6_ifa_notify(0, ifp); 3167 ipv6_ifa_notify(0, ifp);
3188 in6_ifa_put(ifp); 3168 in6_ifa_put(ifp);
@@ -3191,6 +3171,10 @@ restart:
3191#ifdef CONFIG_IPV6_PRIVACY 3171#ifdef CONFIG_IPV6_PRIVACY
3192 } else if ((ifp->flags&IFA_F_TEMPORARY) && 3172 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
3193 !(ifp->flags&IFA_F_TENTATIVE)) { 3173 !(ifp->flags&IFA_F_TENTATIVE)) {
3174 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
3175 ifp->idev->cnf.dad_transmits *
3176 ifp->idev->nd_parms->retrans_time / HZ;
3177
3194 if (age >= ifp->prefered_lft - regen_advance) { 3178 if (age >= ifp->prefered_lft - regen_advance) {
3195 struct inet6_ifaddr *ifpub = ifp->ifpub; 3179 struct inet6_ifaddr *ifpub = ifp->ifpub;
3196 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next)) 3180 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
@@ -3200,7 +3184,7 @@ restart:
3200 in6_ifa_hold(ifp); 3184 in6_ifa_hold(ifp);
3201 in6_ifa_hold(ifpub); 3185 in6_ifa_hold(ifpub);
3202 spin_unlock(&ifp->lock); 3186 spin_unlock(&ifp->lock);
3203 read_unlock(&addrconf_hash_lock); 3187
3204 spin_lock(&ifpub->lock); 3188 spin_lock(&ifpub->lock);
3205 ifpub->regen_count = 0; 3189 ifpub->regen_count = 0;
3206 spin_unlock(&ifpub->lock); 3190 spin_unlock(&ifpub->lock);
@@ -3220,12 +3204,26 @@ restart:
3220 spin_unlock(&ifp->lock); 3204 spin_unlock(&ifp->lock);
3221 } 3205 }
3222 } 3206 }
3223 read_unlock(&addrconf_hash_lock);
3224 } 3207 }
3225 3208
3226 addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next; 3209 next_sec = round_jiffies_up(next);
3210 next_sched = next;
3211
3212 /* If rounded timeout is accurate enough, accept it. */
3213 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
3214 next_sched = next_sec;
3215
3216 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
3217 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
3218 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
3219
3220 ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
3221 now, next, next_sec, next_sched));
3222
3223 addr_chk_timer.expires = next_sched;
3227 add_timer(&addr_chk_timer); 3224 add_timer(&addr_chk_timer);
3228 spin_unlock_bh(&addrconf_verify_lock); 3225 spin_unlock(&addrconf_verify_lock);
3226 rcu_read_unlock_bh();
3229} 3227}
3230 3228
3231static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local) 3229static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
@@ -3515,8 +3513,7 @@ static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
3515 return nlmsg_end(skb, nlh); 3513 return nlmsg_end(skb, nlh);
3516} 3514}
3517 3515
3518enum addr_type_t 3516enum addr_type_t {
3519{
3520 UNICAST_ADDR, 3517 UNICAST_ADDR,
3521 MULTICAST_ADDR, 3518 MULTICAST_ADDR,
3522 ANYCAST_ADDR, 3519 ANYCAST_ADDR,
@@ -3527,7 +3524,6 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3527 struct netlink_callback *cb, enum addr_type_t type, 3524 struct netlink_callback *cb, enum addr_type_t type,
3528 int s_ip_idx, int *p_ip_idx) 3525 int s_ip_idx, int *p_ip_idx)
3529{ 3526{
3530 struct inet6_ifaddr *ifa;
3531 struct ifmcaddr6 *ifmca; 3527 struct ifmcaddr6 *ifmca;
3532 struct ifacaddr6 *ifaca; 3528 struct ifacaddr6 *ifaca;
3533 int err = 1; 3529 int err = 1;
@@ -3535,11 +3531,12 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3535 3531
3536 read_lock_bh(&idev->lock); 3532 read_lock_bh(&idev->lock);
3537 switch (type) { 3533 switch (type) {
3538 case UNICAST_ADDR: 3534 case UNICAST_ADDR: {
3535 struct inet6_ifaddr *ifa;
3536
3539 /* unicast address incl. temp addr */ 3537 /* unicast address incl. temp addr */
3540 for (ifa = idev->addr_list; ifa; 3538 list_for_each_entry(ifa, &idev->addr_list, if_list) {
3541 ifa = ifa->if_next, ip_idx++) { 3539 if (++ip_idx < s_ip_idx)
3542 if (ip_idx < s_ip_idx)
3543 continue; 3540 continue;
3544 err = inet6_fill_ifaddr(skb, ifa, 3541 err = inet6_fill_ifaddr(skb, ifa,
3545 NETLINK_CB(cb->skb).pid, 3542 NETLINK_CB(cb->skb).pid,
@@ -3550,6 +3547,7 @@ static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
3550 break; 3547 break;
3551 } 3548 }
3552 break; 3549 break;
3550 }
3553 case MULTICAST_ADDR: 3551 case MULTICAST_ADDR:
3554 /* multicast address */ 3552 /* multicast address */
3555 for (ifmca = idev->mc_list; ifmca; 3553 for (ifmca = idev->mc_list; ifmca;
@@ -3614,7 +3612,8 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
3614 if (h > s_h || idx > s_idx) 3612 if (h > s_h || idx > s_idx)
3615 s_ip_idx = 0; 3613 s_ip_idx = 0;
3616 ip_idx = 0; 3614 ip_idx = 0;
3617 if ((idev = __in6_dev_get(dev)) == NULL) 3615 idev = __in6_dev_get(dev);
3616 if (!idev)
3618 goto cont; 3617 goto cont;
3619 3618
3620 if (in6_dump_addrs(idev, skb, cb, type, 3619 if (in6_dump_addrs(idev, skb, cb, type,
@@ -3681,12 +3680,14 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr* nlh,
3681 if (ifm->ifa_index) 3680 if (ifm->ifa_index)
3682 dev = __dev_get_by_index(net, ifm->ifa_index); 3681 dev = __dev_get_by_index(net, ifm->ifa_index);
3683 3682
3684 if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) { 3683 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
3684 if (!ifa) {
3685 err = -EADDRNOTAVAIL; 3685 err = -EADDRNOTAVAIL;
3686 goto errout; 3686 goto errout;
3687 } 3687 }
3688 3688
3689 if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) { 3689 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
3690 if (!skb) {
3690 err = -ENOBUFS; 3691 err = -ENOBUFS;
3691 goto errout_ifa; 3692 goto errout_ifa;
3692 } 3693 }
@@ -3811,7 +3812,7 @@ static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
3811static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype, 3812static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
3812 int bytes) 3813 int bytes)
3813{ 3814{
3814 switch(attrtype) { 3815 switch (attrtype) {
3815 case IFLA_INET6_STATS: 3816 case IFLA_INET6_STATS:
3816 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes); 3817 __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
3817 break; 3818 break;
@@ -4047,7 +4048,8 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
4047 addrconf_leave_anycast(ifp); 4048 addrconf_leave_anycast(ifp);
4048 addrconf_leave_solict(ifp->idev, &ifp->addr); 4049 addrconf_leave_solict(ifp->idev, &ifp->addr);
4049 dst_hold(&ifp->rt->u.dst); 4050 dst_hold(&ifp->rt->u.dst);
4050 if (ip6_del_rt(ifp->rt)) 4051
4052 if (ifp->dead && ip6_del_rt(ifp->rt))
4051 dst_free(&ifp->rt->u.dst); 4053 dst_free(&ifp->rt->u.dst);
4052 break; 4054 break;
4053 } 4055 }
@@ -4163,211 +4165,211 @@ static struct addrconf_sysctl_table
4163 .sysctl_header = NULL, 4165 .sysctl_header = NULL,
4164 .addrconf_vars = { 4166 .addrconf_vars = {
4165 { 4167 {
4166 .procname = "forwarding", 4168 .procname = "forwarding",
4167 .data = &ipv6_devconf.forwarding, 4169 .data = &ipv6_devconf.forwarding,
4168 .maxlen = sizeof(int), 4170 .maxlen = sizeof(int),
4169 .mode = 0644, 4171 .mode = 0644,
4170 .proc_handler = addrconf_sysctl_forward, 4172 .proc_handler = addrconf_sysctl_forward,
4171 }, 4173 },
4172 { 4174 {
4173 .procname = "hop_limit", 4175 .procname = "hop_limit",
4174 .data = &ipv6_devconf.hop_limit, 4176 .data = &ipv6_devconf.hop_limit,
4175 .maxlen = sizeof(int), 4177 .maxlen = sizeof(int),
4176 .mode = 0644, 4178 .mode = 0644,
4177 .proc_handler = proc_dointvec, 4179 .proc_handler = proc_dointvec,
4178 }, 4180 },
4179 { 4181 {
4180 .procname = "mtu", 4182 .procname = "mtu",
4181 .data = &ipv6_devconf.mtu6, 4183 .data = &ipv6_devconf.mtu6,
4182 .maxlen = sizeof(int), 4184 .maxlen = sizeof(int),
4183 .mode = 0644, 4185 .mode = 0644,
4184 .proc_handler = proc_dointvec, 4186 .proc_handler = proc_dointvec,
4185 }, 4187 },
4186 { 4188 {
4187 .procname = "accept_ra", 4189 .procname = "accept_ra",
4188 .data = &ipv6_devconf.accept_ra, 4190 .data = &ipv6_devconf.accept_ra,
4189 .maxlen = sizeof(int), 4191 .maxlen = sizeof(int),
4190 .mode = 0644, 4192 .mode = 0644,
4191 .proc_handler = proc_dointvec, 4193 .proc_handler = proc_dointvec,
4192 }, 4194 },
4193 { 4195 {
4194 .procname = "accept_redirects", 4196 .procname = "accept_redirects",
4195 .data = &ipv6_devconf.accept_redirects, 4197 .data = &ipv6_devconf.accept_redirects,
4196 .maxlen = sizeof(int), 4198 .maxlen = sizeof(int),
4197 .mode = 0644, 4199 .mode = 0644,
4198 .proc_handler = proc_dointvec, 4200 .proc_handler = proc_dointvec,
4199 }, 4201 },
4200 { 4202 {
4201 .procname = "autoconf", 4203 .procname = "autoconf",
4202 .data = &ipv6_devconf.autoconf, 4204 .data = &ipv6_devconf.autoconf,
4203 .maxlen = sizeof(int), 4205 .maxlen = sizeof(int),
4204 .mode = 0644, 4206 .mode = 0644,
4205 .proc_handler = proc_dointvec, 4207 .proc_handler = proc_dointvec,
4206 }, 4208 },
4207 { 4209 {
4208 .procname = "dad_transmits", 4210 .procname = "dad_transmits",
4209 .data = &ipv6_devconf.dad_transmits, 4211 .data = &ipv6_devconf.dad_transmits,
4210 .maxlen = sizeof(int), 4212 .maxlen = sizeof(int),
4211 .mode = 0644, 4213 .mode = 0644,
4212 .proc_handler = proc_dointvec, 4214 .proc_handler = proc_dointvec,
4213 }, 4215 },
4214 { 4216 {
4215 .procname = "router_solicitations", 4217 .procname = "router_solicitations",
4216 .data = &ipv6_devconf.rtr_solicits, 4218 .data = &ipv6_devconf.rtr_solicits,
4217 .maxlen = sizeof(int), 4219 .maxlen = sizeof(int),
4218 .mode = 0644, 4220 .mode = 0644,
4219 .proc_handler = proc_dointvec, 4221 .proc_handler = proc_dointvec,
4220 }, 4222 },
4221 { 4223 {
4222 .procname = "router_solicitation_interval", 4224 .procname = "router_solicitation_interval",
4223 .data = &ipv6_devconf.rtr_solicit_interval, 4225 .data = &ipv6_devconf.rtr_solicit_interval,
4224 .maxlen = sizeof(int), 4226 .maxlen = sizeof(int),
4225 .mode = 0644, 4227 .mode = 0644,
4226 .proc_handler = proc_dointvec_jiffies, 4228 .proc_handler = proc_dointvec_jiffies,
4227 }, 4229 },
4228 { 4230 {
4229 .procname = "router_solicitation_delay", 4231 .procname = "router_solicitation_delay",
4230 .data = &ipv6_devconf.rtr_solicit_delay, 4232 .data = &ipv6_devconf.rtr_solicit_delay,
4231 .maxlen = sizeof(int), 4233 .maxlen = sizeof(int),
4232 .mode = 0644, 4234 .mode = 0644,
4233 .proc_handler = proc_dointvec_jiffies, 4235 .proc_handler = proc_dointvec_jiffies,
4234 }, 4236 },
4235 { 4237 {
4236 .procname = "force_mld_version", 4238 .procname = "force_mld_version",
4237 .data = &ipv6_devconf.force_mld_version, 4239 .data = &ipv6_devconf.force_mld_version,
4238 .maxlen = sizeof(int), 4240 .maxlen = sizeof(int),
4239 .mode = 0644, 4241 .mode = 0644,
4240 .proc_handler = proc_dointvec, 4242 .proc_handler = proc_dointvec,
4241 }, 4243 },
4242#ifdef CONFIG_IPV6_PRIVACY 4244#ifdef CONFIG_IPV6_PRIVACY
4243 { 4245 {
4244 .procname = "use_tempaddr", 4246 .procname = "use_tempaddr",
4245 .data = &ipv6_devconf.use_tempaddr, 4247 .data = &ipv6_devconf.use_tempaddr,
4246 .maxlen = sizeof(int), 4248 .maxlen = sizeof(int),
4247 .mode = 0644, 4249 .mode = 0644,
4248 .proc_handler = proc_dointvec, 4250 .proc_handler = proc_dointvec,
4249 }, 4251 },
4250 { 4252 {
4251 .procname = "temp_valid_lft", 4253 .procname = "temp_valid_lft",
4252 .data = &ipv6_devconf.temp_valid_lft, 4254 .data = &ipv6_devconf.temp_valid_lft,
4253 .maxlen = sizeof(int), 4255 .maxlen = sizeof(int),
4254 .mode = 0644, 4256 .mode = 0644,
4255 .proc_handler = proc_dointvec, 4257 .proc_handler = proc_dointvec,
4256 }, 4258 },
4257 { 4259 {
4258 .procname = "temp_prefered_lft", 4260 .procname = "temp_prefered_lft",
4259 .data = &ipv6_devconf.temp_prefered_lft, 4261 .data = &ipv6_devconf.temp_prefered_lft,
4260 .maxlen = sizeof(int), 4262 .maxlen = sizeof(int),
4261 .mode = 0644, 4263 .mode = 0644,
4262 .proc_handler = proc_dointvec, 4264 .proc_handler = proc_dointvec,
4263 }, 4265 },
4264 { 4266 {
4265 .procname = "regen_max_retry", 4267 .procname = "regen_max_retry",
4266 .data = &ipv6_devconf.regen_max_retry, 4268 .data = &ipv6_devconf.regen_max_retry,
4267 .maxlen = sizeof(int), 4269 .maxlen = sizeof(int),
4268 .mode = 0644, 4270 .mode = 0644,
4269 .proc_handler = proc_dointvec, 4271 .proc_handler = proc_dointvec,
4270 }, 4272 },
4271 { 4273 {
4272 .procname = "max_desync_factor", 4274 .procname = "max_desync_factor",
4273 .data = &ipv6_devconf.max_desync_factor, 4275 .data = &ipv6_devconf.max_desync_factor,
4274 .maxlen = sizeof(int), 4276 .maxlen = sizeof(int),
4275 .mode = 0644, 4277 .mode = 0644,
4276 .proc_handler = proc_dointvec, 4278 .proc_handler = proc_dointvec,
4277 }, 4279 },
4278#endif 4280#endif
4279 { 4281 {
4280 .procname = "max_addresses", 4282 .procname = "max_addresses",
4281 .data = &ipv6_devconf.max_addresses, 4283 .data = &ipv6_devconf.max_addresses,
4282 .maxlen = sizeof(int), 4284 .maxlen = sizeof(int),
4283 .mode = 0644, 4285 .mode = 0644,
4284 .proc_handler = proc_dointvec, 4286 .proc_handler = proc_dointvec,
4285 }, 4287 },
4286 { 4288 {
4287 .procname = "accept_ra_defrtr", 4289 .procname = "accept_ra_defrtr",
4288 .data = &ipv6_devconf.accept_ra_defrtr, 4290 .data = &ipv6_devconf.accept_ra_defrtr,
4289 .maxlen = sizeof(int), 4291 .maxlen = sizeof(int),
4290 .mode = 0644, 4292 .mode = 0644,
4291 .proc_handler = proc_dointvec, 4293 .proc_handler = proc_dointvec,
4292 }, 4294 },
4293 { 4295 {
4294 .procname = "accept_ra_pinfo", 4296 .procname = "accept_ra_pinfo",
4295 .data = &ipv6_devconf.accept_ra_pinfo, 4297 .data = &ipv6_devconf.accept_ra_pinfo,
4296 .maxlen = sizeof(int), 4298 .maxlen = sizeof(int),
4297 .mode = 0644, 4299 .mode = 0644,
4298 .proc_handler = proc_dointvec, 4300 .proc_handler = proc_dointvec,
4299 }, 4301 },
4300#ifdef CONFIG_IPV6_ROUTER_PREF 4302#ifdef CONFIG_IPV6_ROUTER_PREF
4301 { 4303 {
4302 .procname = "accept_ra_rtr_pref", 4304 .procname = "accept_ra_rtr_pref",
4303 .data = &ipv6_devconf.accept_ra_rtr_pref, 4305 .data = &ipv6_devconf.accept_ra_rtr_pref,
4304 .maxlen = sizeof(int), 4306 .maxlen = sizeof(int),
4305 .mode = 0644, 4307 .mode = 0644,
4306 .proc_handler = proc_dointvec, 4308 .proc_handler = proc_dointvec,
4307 }, 4309 },
4308 { 4310 {
4309 .procname = "router_probe_interval", 4311 .procname = "router_probe_interval",
4310 .data = &ipv6_devconf.rtr_probe_interval, 4312 .data = &ipv6_devconf.rtr_probe_interval,
4311 .maxlen = sizeof(int), 4313 .maxlen = sizeof(int),
4312 .mode = 0644, 4314 .mode = 0644,
4313 .proc_handler = proc_dointvec_jiffies, 4315 .proc_handler = proc_dointvec_jiffies,
4314 }, 4316 },
4315#ifdef CONFIG_IPV6_ROUTE_INFO 4317#ifdef CONFIG_IPV6_ROUTE_INFO
4316 { 4318 {
4317 .procname = "accept_ra_rt_info_max_plen", 4319 .procname = "accept_ra_rt_info_max_plen",
4318 .data = &ipv6_devconf.accept_ra_rt_info_max_plen, 4320 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
4319 .maxlen = sizeof(int), 4321 .maxlen = sizeof(int),
4320 .mode = 0644, 4322 .mode = 0644,
4321 .proc_handler = proc_dointvec, 4323 .proc_handler = proc_dointvec,
4322 }, 4324 },
4323#endif 4325#endif
4324#endif 4326#endif
4325 { 4327 {
4326 .procname = "proxy_ndp", 4328 .procname = "proxy_ndp",
4327 .data = &ipv6_devconf.proxy_ndp, 4329 .data = &ipv6_devconf.proxy_ndp,
4328 .maxlen = sizeof(int), 4330 .maxlen = sizeof(int),
4329 .mode = 0644, 4331 .mode = 0644,
4330 .proc_handler = proc_dointvec, 4332 .proc_handler = proc_dointvec,
4331 }, 4333 },
4332 { 4334 {
4333 .procname = "accept_source_route", 4335 .procname = "accept_source_route",
4334 .data = &ipv6_devconf.accept_source_route, 4336 .data = &ipv6_devconf.accept_source_route,
4335 .maxlen = sizeof(int), 4337 .maxlen = sizeof(int),
4336 .mode = 0644, 4338 .mode = 0644,
4337 .proc_handler = proc_dointvec, 4339 .proc_handler = proc_dointvec,
4338 }, 4340 },
4339#ifdef CONFIG_IPV6_OPTIMISTIC_DAD 4341#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
4340 { 4342 {
4341 .procname = "optimistic_dad", 4343 .procname = "optimistic_dad",
4342 .data = &ipv6_devconf.optimistic_dad, 4344 .data = &ipv6_devconf.optimistic_dad,
4343 .maxlen = sizeof(int), 4345 .maxlen = sizeof(int),
4344 .mode = 0644, 4346 .mode = 0644,
4345 .proc_handler = proc_dointvec, 4347 .proc_handler = proc_dointvec,
4346 4348
4347 }, 4349 },
4348#endif 4350#endif
4349#ifdef CONFIG_IPV6_MROUTE 4351#ifdef CONFIG_IPV6_MROUTE
4350 { 4352 {
4351 .procname = "mc_forwarding", 4353 .procname = "mc_forwarding",
4352 .data = &ipv6_devconf.mc_forwarding, 4354 .data = &ipv6_devconf.mc_forwarding,
4353 .maxlen = sizeof(int), 4355 .maxlen = sizeof(int),
4354 .mode = 0444, 4356 .mode = 0444,
4355 .proc_handler = proc_dointvec, 4357 .proc_handler = proc_dointvec,
4356 }, 4358 },
4357#endif 4359#endif
4358 { 4360 {
4359 .procname = "disable_ipv6", 4361 .procname = "disable_ipv6",
4360 .data = &ipv6_devconf.disable_ipv6, 4362 .data = &ipv6_devconf.disable_ipv6,
4361 .maxlen = sizeof(int), 4363 .maxlen = sizeof(int),
4362 .mode = 0644, 4364 .mode = 0644,
4363 .proc_handler = addrconf_sysctl_disable, 4365 .proc_handler = addrconf_sysctl_disable,
4364 }, 4366 },
4365 { 4367 {
4366 .procname = "accept_dad", 4368 .procname = "accept_dad",
4367 .data = &ipv6_devconf.accept_dad, 4369 .data = &ipv6_devconf.accept_dad,
4368 .maxlen = sizeof(int), 4370 .maxlen = sizeof(int),
4369 .mode = 0644, 4371 .mode = 0644,
4370 .proc_handler = proc_dointvec, 4372 .proc_handler = proc_dointvec,
4371 }, 4373 },
4372 { 4374 {
4373 .procname = "force_tllao", 4375 .procname = "force_tllao",
@@ -4403,8 +4405,8 @@ static int __addrconf_sysctl_register(struct net *net, char *dev_name,
4403 if (t == NULL) 4405 if (t == NULL)
4404 goto out; 4406 goto out;
4405 4407
4406 for (i=0; t->addrconf_vars[i].data; i++) { 4408 for (i = 0; t->addrconf_vars[i].data; i++) {
4407 t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf; 4409 t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
4408 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */ 4410 t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
4409 t->addrconf_vars[i].extra2 = net; 4411 t->addrconf_vars[i].extra2 = net;
4410 } 4412 }
@@ -4541,14 +4543,12 @@ int register_inet6addr_notifier(struct notifier_block *nb)
4541{ 4543{
4542 return atomic_notifier_chain_register(&inet6addr_chain, nb); 4544 return atomic_notifier_chain_register(&inet6addr_chain, nb);
4543} 4545}
4544
4545EXPORT_SYMBOL(register_inet6addr_notifier); 4546EXPORT_SYMBOL(register_inet6addr_notifier);
4546 4547
4547int unregister_inet6addr_notifier(struct notifier_block *nb) 4548int unregister_inet6addr_notifier(struct notifier_block *nb)
4548{ 4549{
4549 return atomic_notifier_chain_unregister(&inet6addr_chain,nb); 4550 return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
4550} 4551}
4551
4552EXPORT_SYMBOL(unregister_inet6addr_notifier); 4552EXPORT_SYMBOL(unregister_inet6addr_notifier);
4553 4553
4554/* 4554/*
@@ -4557,11 +4557,12 @@ EXPORT_SYMBOL(unregister_inet6addr_notifier);
4557 4557
4558int __init addrconf_init(void) 4558int __init addrconf_init(void)
4559{ 4559{
4560 int err; 4560 int i, err;
4561 4561
4562 if ((err = ipv6_addr_label_init()) < 0) { 4562 err = ipv6_addr_label_init();
4563 printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n", 4563 if (err < 0) {
4564 err); 4564 printk(KERN_CRIT "IPv6 Addrconf:"
4565 " cannot initialize default policy table: %d.\n", err);
4565 return err; 4566 return err;
4566 } 4567 }
4567 4568
@@ -4592,6 +4593,9 @@ int __init addrconf_init(void)
4592 if (err) 4593 if (err)
4593 goto errlo; 4594 goto errlo;
4594 4595
4596 for (i = 0; i < IN6_ADDR_HSIZE; i++)
4597 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
4598
4595 register_netdevice_notifier(&ipv6_dev_notf); 4599 register_netdevice_notifier(&ipv6_dev_notf);
4596 4600
4597 addrconf_verify(0); 4601 addrconf_verify(0);
@@ -4620,7 +4624,6 @@ errlo:
4620 4624
4621void addrconf_cleanup(void) 4625void addrconf_cleanup(void)
4622{ 4626{
4623 struct inet6_ifaddr *ifa;
4624 struct net_device *dev; 4627 struct net_device *dev;
4625 int i; 4628 int i;
4626 4629
@@ -4640,20 +4643,10 @@ void addrconf_cleanup(void)
4640 /* 4643 /*
4641 * Check hash table. 4644 * Check hash table.
4642 */ 4645 */
4643 write_lock_bh(&addrconf_hash_lock); 4646 spin_lock_bh(&addrconf_hash_lock);
4644 for (i=0; i < IN6_ADDR_HSIZE; i++) { 4647 for (i = 0; i < IN6_ADDR_HSIZE; i++)
4645 for (ifa=inet6_addr_lst[i]; ifa; ) { 4648 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
4646 struct inet6_ifaddr *bifa; 4649 spin_unlock_bh(&addrconf_hash_lock);
4647
4648 bifa = ifa;
4649 ifa = ifa->lst_next;
4650 printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa);
4651 /* Do not free it; something is wrong.
4652 Now we can investigate it with debugger.
4653 */
4654 }
4655 }
4656 write_unlock_bh(&addrconf_hash_lock);
4657 4650
4658 del_timer(&addr_chk_timer); 4651 del_timer(&addr_chk_timer);
4659 rtnl_unlock(); 4652 rtnl_unlock();
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index 5e463c43fcc2..8124f16f2ac2 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -208,7 +208,6 @@ static int fib6_rule_fill(struct fib_rule *rule, struct sk_buff *skb,
208{ 208{
209 struct fib6_rule *rule6 = (struct fib6_rule *) rule; 209 struct fib6_rule *rule6 = (struct fib6_rule *) rule;
210 210
211 frh->family = AF_INET6;
212 frh->dst_len = rule6->dst.plen; 211 frh->dst_len = rule6->dst.plen;
213 frh->src_len = rule6->src.plen; 212 frh->src_len = rule6->src.plen;
214 frh->tos = rule6->tclass; 213 frh->tos = rule6->tclass;
@@ -239,7 +238,7 @@ static size_t fib6_rule_nlmsg_payload(struct fib_rule *rule)
239} 238}
240 239
241static struct fib_rules_ops fib6_rules_ops_template = { 240static struct fib_rules_ops fib6_rules_ops_template = {
242 .family = AF_INET6, 241 .family = FIB_RULES_IPV6,
243 .rule_size = sizeof(struct fib6_rule), 242 .rule_size = sizeof(struct fib6_rule),
244 .addr_size = sizeof(struct in6_addr), 243 .addr_size = sizeof(struct in6_addr),
245 .action = fib6_rule_action, 244 .action = fib6_rule_action,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 3330a4bd6157..12d2fa42657d 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -483,6 +483,7 @@ route_done:
483 np->tclass, NULL, &fl, (struct rt6_info*)dst, 483 np->tclass, NULL, &fl, (struct rt6_info*)dst,
484 MSG_DONTWAIT); 484 MSG_DONTWAIT);
485 if (err) { 485 if (err) {
486 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
486 ip6_flush_pending_frames(sk); 487 ip6_flush_pending_frames(sk);
487 goto out_put; 488 goto out_put;
488 } 489 }
@@ -563,6 +564,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
563 (struct rt6_info*)dst, MSG_DONTWAIT); 564 (struct rt6_info*)dst, MSG_DONTWAIT);
564 565
565 if (err) { 566 if (err) {
567 ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
566 ip6_flush_pending_frames(sk); 568 ip6_flush_pending_frames(sk);
567 goto out_put; 569 goto out_put;
568 } 570 }
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 628db24bcf22..0c5e3c3b7fd5 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -178,7 +178,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
178 return dst; 178 return dst;
179} 179}
180 180
181int inet6_csk_xmit(struct sk_buff *skb, int ipfragok) 181int inet6_csk_xmit(struct sk_buff *skb)
182{ 182{
183 struct sock *sk = skb->sk; 183 struct sock *sk = skb->sk;
184 struct inet_sock *inet = inet_sk(sk); 184 struct inet_sock *inet = inet_sk(sk);
@@ -234,7 +234,7 @@ int inet6_csk_xmit(struct sk_buff *skb, int ipfragok)
234 /* Restore final destination back after routing done */ 234 /* Restore final destination back after routing done */
235 ipv6_addr_copy(&fl.fl6_dst, &np->daddr); 235 ipv6_addr_copy(&fl.fl6_dst, &np->daddr);
236 236
237 return ip6_xmit(sk, skb, &fl, np->opt, 0); 237 return ip6_xmit(sk, skb, &fl, np->opt);
238} 238}
239 239
240EXPORT_SYMBOL_GPL(inet6_csk_xmit); 240EXPORT_SYMBOL_GPL(inet6_csk_xmit);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 6b82e02158c6..92a122b7795d 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -128,12 +128,24 @@ static __inline__ u32 fib6_new_sernum(void)
128/* 128/*
129 * test bit 129 * test bit
130 */ 130 */
131#if defined(__LITTLE_ENDIAN)
132# define BITOP_BE32_SWIZZLE (0x1F & ~7)
133#else
134# define BITOP_BE32_SWIZZLE 0
135#endif
131 136
132static __inline__ __be32 addr_bit_set(void *token, int fn_bit) 137static __inline__ __be32 addr_bit_set(void *token, int fn_bit)
133{ 138{
134 __be32 *addr = token; 139 __be32 *addr = token;
135 140 /*
136 return htonl(1 << ((~fn_bit)&0x1F)) & addr[fn_bit>>5]; 141 * Here,
142 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
143 * is optimized version of
144 * htonl(1 << ((~fn_bit)&0x1F))
145 * See include/asm-generic/bitops/le.h.
146 */
147 return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
148 addr[fn_bit >> 5];
137} 149}
138 150
139static __inline__ struct fib6_node * node_alloc(void) 151static __inline__ struct fib6_node * node_alloc(void)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 65f9c379df38..263d4cf5a8de 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -181,11 +181,11 @@ int ip6_output(struct sk_buff *skb)
181} 181}
182 182
183/* 183/*
184 * xmit an sk_buff (used by TCP) 184 * xmit an sk_buff (used by TCP, SCTP and DCCP)
185 */ 185 */
186 186
187int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, 187int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
188 struct ipv6_txoptions *opt, int ipfragok) 188 struct ipv6_txoptions *opt)
189{ 189{
190 struct net *net = sock_net(sk); 190 struct net *net = sock_net(sk);
191 struct ipv6_pinfo *np = inet6_sk(sk); 191 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -231,10 +231,6 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
231 skb_reset_network_header(skb); 231 skb_reset_network_header(skb);
232 hdr = ipv6_hdr(skb); 232 hdr = ipv6_hdr(skb);
233 233
234 /* Allow local fragmentation. */
235 if (ipfragok)
236 skb->local_df = 1;
237
238 /* 234 /*
239 * Fill in the IPv6 header 235 * Fill in the IPv6 header
240 */ 236 */
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 33f60fca7aa7..1160400e9dbd 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -114,9 +114,9 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk,
114 } 114 }
115 opt = xchg(&inet6_sk(sk)->opt, opt); 115 opt = xchg(&inet6_sk(sk)->opt, opt);
116 } else { 116 } else {
117 write_lock(&sk->sk_dst_lock); 117 spin_lock(&sk->sk_dst_lock);
118 opt = xchg(&inet6_sk(sk)->opt, opt); 118 opt = xchg(&inet6_sk(sk)->opt, opt);
119 write_unlock(&sk->sk_dst_lock); 119 spin_unlock(&sk->sk_dst_lock);
120 } 120 }
121 sk_dst_reset(sk); 121 sk_dst_reset(sk);
122 122
@@ -971,14 +971,13 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
971 case IPV6_MTU: 971 case IPV6_MTU:
972 { 972 {
973 struct dst_entry *dst; 973 struct dst_entry *dst;
974
974 val = 0; 975 val = 0;
975 lock_sock(sk); 976 rcu_read_lock();
976 dst = sk_dst_get(sk); 977 dst = __sk_dst_get(sk);
977 if (dst) { 978 if (dst)
978 val = dst_mtu(dst); 979 val = dst_mtu(dst);
979 dst_release(dst); 980 rcu_read_unlock();
980 }
981 release_sock(sk);
982 if (!val) 981 if (!val)
983 return -ENOTCONN; 982 return -ENOTCONN;
984 break; 983 break;
@@ -1066,12 +1065,14 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
1066 else 1065 else
1067 val = np->mcast_hops; 1066 val = np->mcast_hops;
1068 1067
1069 dst = sk_dst_get(sk); 1068 if (val < 0) {
1070 if (dst) { 1069 rcu_read_lock();
1071 if (val < 0) 1070 dst = __sk_dst_get(sk);
1071 if (dst)
1072 val = ip6_dst_hoplimit(dst); 1072 val = ip6_dst_hoplimit(dst);
1073 dst_release(dst); 1073 rcu_read_unlock();
1074 } 1074 }
1075
1075 if (val < 0) 1076 if (val < 0)
1076 val = sock_net(sk)->ipv6.devconf_all->hop_limit; 1077 val = sock_net(sk)->ipv6.devconf_all->hop_limit;
1077 break; 1078 break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index c483ab9fd67b..62ed08213d91 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -715,7 +715,7 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
715 if (!(mc->mca_flags&MAF_LOADED)) { 715 if (!(mc->mca_flags&MAF_LOADED)) {
716 mc->mca_flags |= MAF_LOADED; 716 mc->mca_flags |= MAF_LOADED;
717 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 717 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
718 dev_mc_add(dev, buf, dev->addr_len, 0); 718 dev_mc_add(dev, buf);
719 } 719 }
720 spin_unlock_bh(&mc->mca_lock); 720 spin_unlock_bh(&mc->mca_lock);
721 721
@@ -741,7 +741,7 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
741 if (mc->mca_flags&MAF_LOADED) { 741 if (mc->mca_flags&MAF_LOADED) {
742 mc->mca_flags &= ~MAF_LOADED; 742 mc->mca_flags &= ~MAF_LOADED;
743 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0) 743 if (ndisc_mc_map(&mc->mca_addr, buf, dev, 0) == 0)
744 dev_mc_delete(dev, buf, dev->addr_len, 0); 744 dev_mc_del(dev, buf);
745 } 745 }
746 746
747 if (mc->mca_flags & MAF_NOREPORT) 747 if (mc->mca_flags & MAF_NOREPORT)
diff --git a/net/ipv6/netfilter/ip6t_hbh.c b/net/ipv6/netfilter/ip6t_hbh.c
index cbe8dec9744b..e60677519e40 100644
--- a/net/ipv6/netfilter/ip6t_hbh.c
+++ b/net/ipv6/netfilter/ip6t_hbh.c
@@ -141,11 +141,11 @@ hbh_mt6(const struct sk_buff *skb, const struct xt_match_param *par)
141 } 141 }
142 142
143 /* Step to the next */ 143 /* Step to the next */
144 pr_debug("len%04X \n", optlen); 144 pr_debug("len%04X\n", optlen);
145 145
146 if ((ptr > skb->len - optlen || hdrlen < optlen) && 146 if ((ptr > skb->len - optlen || hdrlen < optlen) &&
147 temp < optinfo->optsnr - 1) { 147 temp < optinfo->optsnr - 1) {
148 pr_debug("new pointer is too large! \n"); 148 pr_debug("new pointer is too large!\n");
149 break; 149 break;
150 } 150 }
151 ptr += optlen; 151 ptr += optlen;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index 58344c0fbd13..458eabfbe130 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -97,6 +97,7 @@ static const struct snmp_mib snmp6_icmp6_list[] = {
97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS), 97 SNMP_MIB_ITEM("Icmp6InMsgs", ICMP6_MIB_INMSGS),
98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS), 98 SNMP_MIB_ITEM("Icmp6InErrors", ICMP6_MIB_INERRORS),
99 SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS), 99 SNMP_MIB_ITEM("Icmp6OutMsgs", ICMP6_MIB_OUTMSGS),
100 SNMP_MIB_ITEM("Icmp6OutErrors", ICMP6_MIB_OUTERRORS),
100 SNMP_MIB_SENTINEL 101 SNMP_MIB_SENTINEL
101}; 102};
102 103
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 075f540ec197..78480f410a9b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -75,6 +75,9 @@ static void tcp_v6_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
75 struct request_sock *req); 75 struct request_sock *req);
76 76
77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb); 77static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
78static void __tcp_v6_send_check(struct sk_buff *skb,
79 struct in6_addr *saddr,
80 struct in6_addr *daddr);
78 81
79static const struct inet_connection_sock_af_ops ipv6_mapped; 82static const struct inet_connection_sock_af_ops ipv6_mapped;
80static const struct inet_connection_sock_af_ops ipv6_specific; 83static const struct inet_connection_sock_af_ops ipv6_specific;
@@ -503,14 +506,10 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
503 506
504 skb = tcp_make_synack(sk, dst, req, rvp); 507 skb = tcp_make_synack(sk, dst, req, rvp);
505 if (skb) { 508 if (skb) {
506 struct tcphdr *th = tcp_hdr(skb); 509 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
507
508 th->check = tcp_v6_check(skb->len,
509 &treq->loc_addr, &treq->rmt_addr,
510 csum_partial(th, skb->len, skb->csum));
511 510
512 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr); 511 ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
513 err = ip6_xmit(sk, skb, &fl, opt, 0); 512 err = ip6_xmit(sk, skb, &fl, opt);
514 err = net_xmit_eval(err); 513 err = net_xmit_eval(err);
515 } 514 }
516 515
@@ -918,22 +917,29 @@ static struct timewait_sock_ops tcp6_timewait_sock_ops = {
918 .twsk_destructor= tcp_twsk_destructor, 917 .twsk_destructor= tcp_twsk_destructor,
919}; 918};
920 919
921static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) 920static void __tcp_v6_send_check(struct sk_buff *skb,
921 struct in6_addr *saddr, struct in6_addr *daddr)
922{ 922{
923 struct ipv6_pinfo *np = inet6_sk(sk);
924 struct tcphdr *th = tcp_hdr(skb); 923 struct tcphdr *th = tcp_hdr(skb);
925 924
926 if (skb->ip_summed == CHECKSUM_PARTIAL) { 925 if (skb->ip_summed == CHECKSUM_PARTIAL) {
927 th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0); 926 th->check = ~tcp_v6_check(skb->len, saddr, daddr, 0);
928 skb->csum_start = skb_transport_header(skb) - skb->head; 927 skb->csum_start = skb_transport_header(skb) - skb->head;
929 skb->csum_offset = offsetof(struct tcphdr, check); 928 skb->csum_offset = offsetof(struct tcphdr, check);
930 } else { 929 } else {
931 th->check = csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 930 th->check = tcp_v6_check(skb->len, saddr, daddr,
932 csum_partial(th, th->doff<<2, 931 csum_partial(th, th->doff << 2,
933 skb->csum)); 932 skb->csum));
934 } 933 }
935} 934}
936 935
936static void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
937{
938 struct ipv6_pinfo *np = inet6_sk(sk);
939
940 __tcp_v6_send_check(skb, &np->saddr, &np->daddr);
941}
942
937static int tcp_v6_gso_send_check(struct sk_buff *skb) 943static int tcp_v6_gso_send_check(struct sk_buff *skb)
938{ 944{
939 struct ipv6hdr *ipv6h; 945 struct ipv6hdr *ipv6h;
@@ -946,11 +952,8 @@ static int tcp_v6_gso_send_check(struct sk_buff *skb)
946 th = tcp_hdr(skb); 952 th = tcp_hdr(skb);
947 953
948 th->check = 0; 954 th->check = 0;
949 th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len,
950 IPPROTO_TCP, 0);
951 skb->csum_start = skb_transport_header(skb) - skb->head;
952 skb->csum_offset = offsetof(struct tcphdr, check);
953 skb->ip_summed = CHECKSUM_PARTIAL; 955 skb->ip_summed = CHECKSUM_PARTIAL;
956 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
954 return 0; 957 return 0;
955} 958}
956 959
@@ -1053,9 +1056,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1053 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr); 1056 ipv6_addr_copy(&fl.fl6_dst, &ipv6_hdr(skb)->saddr);
1054 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr); 1057 ipv6_addr_copy(&fl.fl6_src, &ipv6_hdr(skb)->daddr);
1055 1058
1056 t1->check = csum_ipv6_magic(&fl.fl6_src, &fl.fl6_dst, 1059 __tcp_v6_send_check(buff, &fl.fl6_src, &fl.fl6_dst);
1057 tot_len, IPPROTO_TCP,
1058 buff->csum);
1059 1060
1060 fl.proto = IPPROTO_TCP; 1061 fl.proto = IPPROTO_TCP;
1061 fl.oif = inet6_iif(skb); 1062 fl.oif = inet6_iif(skb);
@@ -1070,7 +1071,7 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
1070 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) { 1071 if (!ip6_dst_lookup(ctl_sk, &dst, &fl)) {
1071 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) { 1072 if (xfrm_lookup(net, &dst, &fl, NULL, 0) >= 0) {
1072 skb_dst_set(buff, dst); 1073 skb_dst_set(buff, dst);
1073 ip6_xmit(ctl_sk, buff, &fl, NULL, 0); 1074 ip6_xmit(ctl_sk, buff, &fl, NULL);
1074 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); 1075 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
1075 if (rst) 1076 if (rst)
1076 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); 1077 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
@@ -1233,12 +1234,12 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1233 goto drop_and_free; 1234 goto drop_and_free;
1234 1235
1235 /* Secret recipe starts with IP addresses */ 1236 /* Secret recipe starts with IP addresses */
1236 d = &ipv6_hdr(skb)->daddr.s6_addr32[0]; 1237 d = (__force u32 *)&ipv6_hdr(skb)->daddr.s6_addr32[0];
1237 *mess++ ^= *d++; 1238 *mess++ ^= *d++;
1238 *mess++ ^= *d++; 1239 *mess++ ^= *d++;
1239 *mess++ ^= *d++; 1240 *mess++ ^= *d++;
1240 *mess++ ^= *d++; 1241 *mess++ ^= *d++;
1241 d = &ipv6_hdr(skb)->saddr.s6_addr32[0]; 1242 d = (__force u32 *)&ipv6_hdr(skb)->saddr.s6_addr32[0];
1242 *mess++ ^= *d++; 1243 *mess++ ^= *d++;
1243 *mess++ ^= *d++; 1244 *mess++ ^= *d++;
1244 *mess++ ^= *d++; 1245 *mess++ ^= *d++;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 90824852f598..92bf9033e245 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -91,9 +91,9 @@ static unsigned int udp6_portaddr_hash(struct net *net,
91 if (ipv6_addr_any(addr6)) 91 if (ipv6_addr_any(addr6))
92 hash = jhash_1word(0, mix); 92 hash = jhash_1word(0, mix);
93 else if (ipv6_addr_v4mapped(addr6)) 93 else if (ipv6_addr_v4mapped(addr6))
94 hash = jhash_1word(addr6->s6_addr32[3], mix); 94 hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix);
95 else 95 else
96 hash = jhash2(addr6->s6_addr32, 4, mix); 96 hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix);
97 97
98 return hash ^ port; 98 return hash ^ port;
99} 99}
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ae181651c75a..8c452fd5ceae 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -67,36 +67,6 @@ static int xfrm6_get_saddr(struct net *net,
67 return 0; 67 return 0;
68} 68}
69 69
70static struct dst_entry *
71__xfrm6_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
72{
73 struct dst_entry *dst;
74
75 /* Still not clear if we should set fl->fl6_{src,dst}... */
76 read_lock_bh(&policy->lock);
77 for (dst = policy->bundles; dst; dst = dst->next) {
78 struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
79 struct in6_addr fl_dst_prefix, fl_src_prefix;
80
81 ipv6_addr_prefix(&fl_dst_prefix,
82 &fl->fl6_dst,
83 xdst->u.rt6.rt6i_dst.plen);
84 ipv6_addr_prefix(&fl_src_prefix,
85 &fl->fl6_src,
86 xdst->u.rt6.rt6i_src.plen);
87 if (ipv6_addr_equal(&xdst->u.rt6.rt6i_dst.addr, &fl_dst_prefix) &&
88 ipv6_addr_equal(&xdst->u.rt6.rt6i_src.addr, &fl_src_prefix) &&
89 xfrm_bundle_ok(policy, xdst, fl, AF_INET6,
90 (xdst->u.rt6.rt6i_dst.plen != 128 ||
91 xdst->u.rt6.rt6i_src.plen != 128))) {
92 dst_clone(dst);
93 break;
94 }
95 }
96 read_unlock_bh(&policy->lock);
97 return dst;
98}
99
100static int xfrm6_get_tos(struct flowi *fl) 70static int xfrm6_get_tos(struct flowi *fl)
101{ 71{
102 return 0; 72 return 0;
@@ -291,7 +261,6 @@ static struct xfrm_policy_afinfo xfrm6_policy_afinfo = {
291 .dst_ops = &xfrm6_dst_ops, 261 .dst_ops = &xfrm6_dst_ops,
292 .dst_lookup = xfrm6_dst_lookup, 262 .dst_lookup = xfrm6_dst_lookup,
293 .get_saddr = xfrm6_get_saddr, 263 .get_saddr = xfrm6_get_saddr,
294 .find_bundle = __xfrm6_find_bundle,
295 .decode_session = _decode_session6, 264 .decode_session = _decode_session6,
296 .get_tos = xfrm6_get_tos, 265 .get_tos = xfrm6_get_tos,
297 .init_path = xfrm6_init_path, 266 .init_path = xfrm6_init_path,
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 2a4efcea3423..79986a674f6e 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -347,7 +347,7 @@ static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow)
347 self->tx_flow = flow; 347 self->tx_flow = flow;
348 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", 348 IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n",
349 __func__); 349 __func__);
350 wake_up_interruptible(sk->sk_sleep); 350 wake_up_interruptible(sk_sleep(sk));
351 break; 351 break;
352 default: 352 default:
353 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); 353 IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__);
@@ -900,7 +900,7 @@ static int irda_accept(struct socket *sock, struct socket *newsock, int flags)
900 if (flags & O_NONBLOCK) 900 if (flags & O_NONBLOCK)
901 goto out; 901 goto out;
902 902
903 err = wait_event_interruptible(*(sk->sk_sleep), 903 err = wait_event_interruptible(*(sk_sleep(sk)),
904 skb_peek(&sk->sk_receive_queue)); 904 skb_peek(&sk->sk_receive_queue));
905 if (err) 905 if (err)
906 goto out; 906 goto out;
@@ -1066,7 +1066,7 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1066 goto out; 1066 goto out;
1067 1067
1068 err = -ERESTARTSYS; 1068 err = -ERESTARTSYS;
1069 if (wait_event_interruptible(*(sk->sk_sleep), 1069 if (wait_event_interruptible(*(sk_sleep(sk)),
1070 (sk->sk_state != TCP_SYN_SENT))) 1070 (sk->sk_state != TCP_SYN_SENT)))
1071 goto out; 1071 goto out;
1072 1072
@@ -1318,7 +1318,7 @@ static int irda_sendmsg(struct kiocb *iocb, struct socket *sock,
1318 1318
1319 /* Check if IrTTP is wants us to slow down */ 1319 /* Check if IrTTP is wants us to slow down */
1320 1320
1321 if (wait_event_interruptible(*(sk->sk_sleep), 1321 if (wait_event_interruptible(*(sk_sleep(sk)),
1322 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { 1322 (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) {
1323 err = -ERESTARTSYS; 1323 err = -ERESTARTSYS;
1324 goto out; 1324 goto out;
@@ -1477,7 +1477,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1477 if (copied >= target) 1477 if (copied >= target)
1478 break; 1478 break;
1479 1479
1480 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1480 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1481 1481
1482 /* 1482 /*
1483 * POSIX 1003.1g mandates this order. 1483 * POSIX 1003.1g mandates this order.
@@ -1497,7 +1497,7 @@ static int irda_recvmsg_stream(struct kiocb *iocb, struct socket *sock,
1497 /* Wait process until data arrives */ 1497 /* Wait process until data arrives */
1498 schedule(); 1498 schedule();
1499 1499
1500 finish_wait(sk->sk_sleep, &wait); 1500 finish_wait(sk_sleep(sk), &wait);
1501 1501
1502 if (err) 1502 if (err)
1503 goto out; 1503 goto out;
@@ -1787,7 +1787,7 @@ static unsigned int irda_poll(struct file * file, struct socket *sock,
1787 IRDA_DEBUG(4, "%s()\n", __func__); 1787 IRDA_DEBUG(4, "%s()\n", __func__);
1788 1788
1789 lock_kernel(); 1789 lock_kernel();
1790 poll_wait(file, sk->sk_sleep, wait); 1790 poll_wait(file, sk_sleep(sk), wait);
1791 mask = 0; 1791 mask = 0;
1792 1792
1793 /* Exceptional events? */ 1793 /* Exceptional events? */
diff --git a/net/irda/ircomm/ircomm_param.c b/net/irda/ircomm/ircomm_param.c
index e2e893b474e9..8b915f3ac3b9 100644
--- a/net/irda/ircomm/ircomm_param.c
+++ b/net/irda/ircomm/ircomm_param.c
@@ -475,7 +475,7 @@ static int ircomm_param_dce(void *instance, irda_param_t *param, int get)
475 /* Check if any of the settings have changed */ 475 /* Check if any of the settings have changed */
476 if (dce & 0x0f) { 476 if (dce & 0x0f) {
477 if (dce & IRCOMM_DELTA_CTS) { 477 if (dce & IRCOMM_DELTA_CTS) {
478 IRDA_DEBUG(2, "%s(), CTS \n", __func__ ); 478 IRDA_DEBUG(2, "%s(), CTS\n", __func__ );
479 } 479 }
480 } 480 }
481 481
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index c18286a2167b..9636b7d27b48 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -59,7 +59,7 @@ do { \
59 DEFINE_WAIT(__wait); \ 59 DEFINE_WAIT(__wait); \
60 long __timeo = timeo; \ 60 long __timeo = timeo; \
61 ret = 0; \ 61 ret = 0; \
62 prepare_to_wait(sk->sk_sleep, &__wait, TASK_INTERRUPTIBLE); \ 62 prepare_to_wait(sk_sleep(sk), &__wait, TASK_INTERRUPTIBLE); \
63 while (!(condition)) { \ 63 while (!(condition)) { \
64 if (!__timeo) { \ 64 if (!__timeo) { \
65 ret = -EAGAIN; \ 65 ret = -EAGAIN; \
@@ -76,7 +76,7 @@ do { \
76 if (ret) \ 76 if (ret) \
77 break; \ 77 break; \
78 } \ 78 } \
79 finish_wait(sk->sk_sleep, &__wait); \ 79 finish_wait(sk_sleep(sk), &__wait); \
80} while (0) 80} while (0)
81 81
82#define iucv_sock_wait(sk, condition, timeo) \ 82#define iucv_sock_wait(sk, condition, timeo) \
@@ -307,7 +307,7 @@ static void iucv_sock_wake_msglim(struct sock *sk)
307{ 307{
308 read_lock(&sk->sk_callback_lock); 308 read_lock(&sk->sk_callback_lock);
309 if (sk_has_sleeper(sk)) 309 if (sk_has_sleeper(sk))
310 wake_up_interruptible_all(sk->sk_sleep); 310 wake_up_interruptible_all(sk_sleep(sk));
311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 311 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
312 read_unlock(&sk->sk_callback_lock); 312 read_unlock(&sk->sk_callback_lock);
313} 313}
@@ -795,7 +795,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
795 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); 795 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
796 796
797 /* Wait for an incoming connection */ 797 /* Wait for an incoming connection */
798 add_wait_queue_exclusive(sk->sk_sleep, &wait); 798 add_wait_queue_exclusive(sk_sleep(sk), &wait);
799 while (!(nsk = iucv_accept_dequeue(sk, newsock))) { 799 while (!(nsk = iucv_accept_dequeue(sk, newsock))) {
800 set_current_state(TASK_INTERRUPTIBLE); 800 set_current_state(TASK_INTERRUPTIBLE);
801 if (!timeo) { 801 if (!timeo) {
@@ -819,7 +819,7 @@ static int iucv_sock_accept(struct socket *sock, struct socket *newsock,
819 } 819 }
820 820
821 set_current_state(TASK_RUNNING); 821 set_current_state(TASK_RUNNING);
822 remove_wait_queue(sk->sk_sleep, &wait); 822 remove_wait_queue(sk_sleep(sk), &wait);
823 823
824 if (err) 824 if (err)
825 goto done; 825 goto done;
@@ -1269,7 +1269,7 @@ unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
1269 struct sock *sk = sock->sk; 1269 struct sock *sk = sock->sk;
1270 unsigned int mask = 0; 1270 unsigned int mask = 0;
1271 1271
1272 sock_poll_wait(file, sk->sk_sleep, wait); 1272 sock_poll_wait(file, sk_sleep(sk), wait);
1273 1273
1274 if (sk->sk_state == IUCV_LISTEN) 1274 if (sk->sk_state == IUCV_LISTEN)
1275 return iucv_accept_poll(sk); 1275 return iucv_accept_poll(sk);
diff --git a/net/l2tp/Kconfig b/net/l2tp/Kconfig
new file mode 100644
index 000000000000..4b1e71751e10
--- /dev/null
+++ b/net/l2tp/Kconfig
@@ -0,0 +1,107 @@
1#
2# Layer Two Tunneling Protocol (L2TP)
3#
4
5menuconfig L2TP
6 tristate "Layer Two Tunneling Protocol (L2TP)"
7 depends on INET
8 ---help---
9 Layer Two Tunneling Protocol
10
11 From RFC 2661 <http://www.ietf.org/rfc/rfc2661.txt>.
12
13 L2TP facilitates the tunneling of packets across an
14 intervening network in a way that is as transparent as
15 possible to both end-users and applications.
16
17 L2TP is often used to tunnel PPP traffic over IP
18 tunnels. One IP tunnel may carry thousands of individual PPP
19 connections. L2TP is also used as a VPN protocol, popular
20 with home workers to connect to their offices.
21
22 L2TPv3 allows other protocols as well as PPP to be carried
23 over L2TP tunnels. L2TPv3 is defined in RFC 3931
24 <http://www.ietf.org/rfc/rfc3931.txt>.
25
26 The kernel component handles only L2TP data packets: a
27 userland daemon handles L2TP the control protocol (tunnel
28 and session setup). One such daemon is OpenL2TP
29 (http://openl2tp.org/).
30
31 If you don't need L2TP, say N. To compile all L2TP code as
32 modules, choose M here.
33
34config L2TP_DEBUGFS
35 tristate "L2TP debugfs support"
36 depends on L2TP && DEBUG_FS
37 help
38 Support for l2tp directory in debugfs filesystem. This may be
39 used to dump internal state of the l2tp drivers for problem
40 analysis.
41
42 If unsure, say 'Y'.
43
44 To compile this driver as a module, choose M here. The module
45 will be called l2tp_debugfs.
46
47config L2TP_V3
48 bool "L2TPv3 support (EXPERIMENTAL)"
49 depends on EXPERIMENTAL && L2TP
50 help
51 Layer Two Tunneling Protocol Version 3
52
53 From RFC 3931 <http://www.ietf.org/rfc/rfc3931.txt>.
54
55 The Layer Two Tunneling Protocol (L2TP) provides a dynamic
56 mechanism for tunneling Layer 2 (L2) "circuits" across a
57 packet-oriented data network (e.g., over IP). L2TP, as
58 originally defined in RFC 2661, is a standard method for
59 tunneling Point-to-Point Protocol (PPP) [RFC1661] sessions.
60 L2TP has since been adopted for tunneling a number of other
61 L2 protocols, including ATM, Frame Relay, HDLC and even raw
62 ethernet frames.
63
64 If you are connecting to L2TPv3 equipment, or you want to
65 tunnel raw ethernet frames using L2TP, say Y here. If
66 unsure, say N.
67
68config L2TP_IP
69 tristate "L2TP IP encapsulation for L2TPv3"
70 depends on L2TP_V3
71 help
72 Support for L2TP-over-IP socket family.
73
74 The L2TPv3 protocol defines two possible encapsulations for
75 L2TP frames, namely UDP and plain IP (without UDP). This
76 driver provides a new L2TPIP socket family with which
77 userspace L2TPv3 daemons may create L2TP/IP tunnel sockets
78 when UDP encapsulation is not required. When L2TP is carried
79 in IP packets, it used IP protocol number 115, so this port
80 must be enabled in firewalls.
81
82 To compile this driver as a module, choose M here. The module
83 will be called l2tp_ip.
84
85config L2TP_ETH
86 tristate "L2TP ethernet pseudowire support for L2TPv3"
87 depends on L2TP_V3
88 help
89 Support for carrying raw ethernet frames over L2TPv3.
90
91 From RFC 4719 <http://www.ietf.org/rfc/rfc4719.txt>.
92
93 The Layer 2 Tunneling Protocol, Version 3 (L2TPv3) can be
94 used as a control protocol and for data encapsulation to set
95 up Pseudowires for transporting layer 2 Packet Data Units
96 across an IP network [RFC3931].
97
98 This driver provides an ethernet virtual interface for each
99 L2TP ethernet pseudowire instance. Standard Linux tools may
100 be used to assign an IP address to the local virtual
101 interface, or add the interface to a bridge.
102
103 If you are using L2TPv3, you will almost certainly want to
104 enable this option.
105
106 To compile this driver as a module, choose M here. The module
107 will be called l2tp_eth.
diff --git a/net/l2tp/Makefile b/net/l2tp/Makefile
new file mode 100644
index 000000000000..110e7bc2de5e
--- /dev/null
+++ b/net/l2tp/Makefile
@@ -0,0 +1,12 @@
1#
2# Makefile for the L2TP.
3#
4
5obj-$(CONFIG_L2TP) += l2tp_core.o
6
7# Build l2tp as modules if L2TP is M
8obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_PPPOL2TP)) += l2tp_ppp.o
9obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_IP)) += l2tp_ip.o
10obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_V3)) += l2tp_netlink.o
11obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_ETH)) += l2tp_eth.o
12obj-$(subst y,$(CONFIG_L2TP),$(CONFIG_L2TP_DEBUGFS)) += l2tp_debugfs.o
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
new file mode 100644
index 000000000000..ecc7aea9efe4
--- /dev/null
+++ b/net/l2tp/l2tp_core.c
@@ -0,0 +1,1693 @@
1/*
2 * L2TP core.
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This file contains some code of the original L2TPv2 pppol2tp
7 * driver, which has the following copyright:
8 *
9 * Authors: Martijn van Oosterhout <kleptog@svana.org>
10 * James Chapman (jchapman@katalix.com)
11 * Contributors:
12 * Michal Ostrowski <mostrows@speakeasy.net>
13 * Arnaldo Carvalho de Melo <acme@xconectiva.com.br>
14 * David S. Miller (davem@redhat.com)
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License version 2 as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/module.h>
22#include <linux/string.h>
23#include <linux/list.h>
24#include <linux/rculist.h>
25#include <linux/uaccess.h>
26
27#include <linux/kernel.h>
28#include <linux/spinlock.h>
29#include <linux/kthread.h>
30#include <linux/sched.h>
31#include <linux/slab.h>
32#include <linux/errno.h>
33#include <linux/jiffies.h>
34
35#include <linux/netdevice.h>
36#include <linux/net.h>
37#include <linux/inetdevice.h>
38#include <linux/skbuff.h>
39#include <linux/init.h>
40#include <linux/in.h>
41#include <linux/ip.h>
42#include <linux/udp.h>
43#include <linux/l2tp.h>
44#include <linux/hash.h>
45#include <linux/sort.h>
46#include <linux/file.h>
47#include <linux/nsproxy.h>
48#include <net/net_namespace.h>
49#include <net/netns/generic.h>
50#include <net/dst.h>
51#include <net/ip.h>
52#include <net/udp.h>
53#include <net/inet_common.h>
54#include <net/xfrm.h>
55#include <net/protocol.h>
56
57#include <asm/byteorder.h>
58#include <asm/atomic.h>
59
60#include "l2tp_core.h"
61
62#define L2TP_DRV_VERSION "V2.0"
63
64/* L2TP header constants */
65#define L2TP_HDRFLAG_T 0x8000
66#define L2TP_HDRFLAG_L 0x4000
67#define L2TP_HDRFLAG_S 0x0800
68#define L2TP_HDRFLAG_O 0x0200
69#define L2TP_HDRFLAG_P 0x0100
70
71#define L2TP_HDR_VER_MASK 0x000F
72#define L2TP_HDR_VER_2 0x0002
73#define L2TP_HDR_VER_3 0x0003
74
75/* L2TPv3 default L2-specific sublayer */
76#define L2TP_SLFLAG_S 0x40000000
77#define L2TP_SL_SEQ_MASK 0x00ffffff
78
79#define L2TP_HDR_SIZE_SEQ 10
80#define L2TP_HDR_SIZE_NOSEQ 6
81
82/* Default trace flags */
83#define L2TP_DEFAULT_DEBUG_FLAGS 0
84
85#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
86 do { \
87 if ((_mask) & (_type)) \
88 printk(_lvl "L2TP: " _fmt, ##args); \
89 } while (0)
90
91/* Private data stored for received packets in the skb.
92 */
93struct l2tp_skb_cb {
94 u32 ns;
95 u16 has_seq;
96 u16 length;
97 unsigned long expires;
98};
99
100#define L2TP_SKB_CB(skb) ((struct l2tp_skb_cb *) &skb->cb[sizeof(struct inet_skb_parm)])
101
102static atomic_t l2tp_tunnel_count;
103static atomic_t l2tp_session_count;
104
105/* per-net private data for this module */
106static unsigned int l2tp_net_id;
107struct l2tp_net {
108 struct list_head l2tp_tunnel_list;
109 spinlock_t l2tp_tunnel_list_lock;
110 struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2];
111 spinlock_t l2tp_session_hlist_lock;
112};
113
114static inline struct l2tp_net *l2tp_pernet(struct net *net)
115{
116 BUG_ON(!net);
117
118 return net_generic(net, l2tp_net_id);
119}
120
121/* Session hash global list for L2TPv3.
122 * The session_id SHOULD be random according to RFC3931, but several
123 * L2TP implementations use incrementing session_ids. So we do a real
124 * hash on the session_id, rather than a simple bitmask.
125 */
126static inline struct hlist_head *
127l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id)
128{
129 return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)];
130
131}
132
133/* Lookup a session by id in the global session list
134 */
135static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id)
136{
137 struct l2tp_net *pn = l2tp_pernet(net);
138 struct hlist_head *session_list =
139 l2tp_session_id_hash_2(pn, session_id);
140 struct l2tp_session *session;
141 struct hlist_node *walk;
142
143 rcu_read_lock_bh();
144 hlist_for_each_entry_rcu(session, walk, session_list, global_hlist) {
145 if (session->session_id == session_id) {
146 rcu_read_unlock_bh();
147 return session;
148 }
149 }
150 rcu_read_unlock_bh();
151
152 return NULL;
153}
154
155/* Session hash list.
156 * The session_id SHOULD be random according to RFC2661, but several
157 * L2TP implementations (Cisco and Microsoft) use incrementing
158 * session_ids. So we do a real hash on the session_id, rather than a
159 * simple bitmask.
160 */
161static inline struct hlist_head *
162l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id)
163{
164 return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)];
165}
166
167/* Lookup a session by id
168 */
169struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id)
170{
171 struct hlist_head *session_list;
172 struct l2tp_session *session;
173 struct hlist_node *walk;
174
175 /* In L2TPv3, session_ids are unique over all tunnels and we
176 * sometimes need to look them up before we know the
177 * tunnel.
178 */
179 if (tunnel == NULL)
180 return l2tp_session_find_2(net, session_id);
181
182 session_list = l2tp_session_id_hash(tunnel, session_id);
183 read_lock_bh(&tunnel->hlist_lock);
184 hlist_for_each_entry(session, walk, session_list, hlist) {
185 if (session->session_id == session_id) {
186 read_unlock_bh(&tunnel->hlist_lock);
187 return session;
188 }
189 }
190 read_unlock_bh(&tunnel->hlist_lock);
191
192 return NULL;
193}
194EXPORT_SYMBOL_GPL(l2tp_session_find);
195
196struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth)
197{
198 int hash;
199 struct hlist_node *walk;
200 struct l2tp_session *session;
201 int count = 0;
202
203 read_lock_bh(&tunnel->hlist_lock);
204 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
205 hlist_for_each_entry(session, walk, &tunnel->session_hlist[hash], hlist) {
206 if (++count > nth) {
207 read_unlock_bh(&tunnel->hlist_lock);
208 return session;
209 }
210 }
211 }
212
213 read_unlock_bh(&tunnel->hlist_lock);
214
215 return NULL;
216}
217EXPORT_SYMBOL_GPL(l2tp_session_find_nth);
218
219/* Lookup a session by interface name.
220 * This is very inefficient but is only used by management interfaces.
221 */
222struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname)
223{
224 struct l2tp_net *pn = l2tp_pernet(net);
225 int hash;
226 struct hlist_node *walk;
227 struct l2tp_session *session;
228
229 rcu_read_lock_bh();
230 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) {
231 hlist_for_each_entry_rcu(session, walk, &pn->l2tp_session_hlist[hash], global_hlist) {
232 if (!strcmp(session->ifname, ifname)) {
233 rcu_read_unlock_bh();
234 return session;
235 }
236 }
237 }
238
239 rcu_read_unlock_bh();
240
241 return NULL;
242}
243EXPORT_SYMBOL_GPL(l2tp_session_find_by_ifname);
244
245/* Lookup a tunnel by id
246 */
247struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id)
248{
249 struct l2tp_tunnel *tunnel;
250 struct l2tp_net *pn = l2tp_pernet(net);
251
252 rcu_read_lock_bh();
253 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
254 if (tunnel->tunnel_id == tunnel_id) {
255 rcu_read_unlock_bh();
256 return tunnel;
257 }
258 }
259 rcu_read_unlock_bh();
260
261 return NULL;
262}
263EXPORT_SYMBOL_GPL(l2tp_tunnel_find);
264
265struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth)
266{
267 struct l2tp_net *pn = l2tp_pernet(net);
268 struct l2tp_tunnel *tunnel;
269 int count = 0;
270
271 rcu_read_lock_bh();
272 list_for_each_entry_rcu(tunnel, &pn->l2tp_tunnel_list, list) {
273 if (++count > nth) {
274 rcu_read_unlock_bh();
275 return tunnel;
276 }
277 }
278
279 rcu_read_unlock_bh();
280
281 return NULL;
282}
283EXPORT_SYMBOL_GPL(l2tp_tunnel_find_nth);
284
285/*****************************************************************************
286 * Receive data handling
287 *****************************************************************************/
288
289/* Queue a skb in order. We come here only if the skb has an L2TP sequence
290 * number.
291 */
292static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *skb)
293{
294 struct sk_buff *skbp;
295 struct sk_buff *tmp;
296 u32 ns = L2TP_SKB_CB(skb)->ns;
297
298 spin_lock_bh(&session->reorder_q.lock);
299 skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {
300 if (L2TP_SKB_CB(skbp)->ns > ns) {
301 __skb_queue_before(&session->reorder_q, skbp, skb);
302 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
303 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",
304 session->name, ns, L2TP_SKB_CB(skbp)->ns,
305 skb_queue_len(&session->reorder_q));
306 session->stats.rx_oos_packets++;
307 goto out;
308 }
309 }
310
311 __skb_queue_tail(&session->reorder_q, skb);
312
313out:
314 spin_unlock_bh(&session->reorder_q.lock);
315}
316
317/* Dequeue a single skb.
318 */
319static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *skb)
320{
321 struct l2tp_tunnel *tunnel = session->tunnel;
322 int length = L2TP_SKB_CB(skb)->length;
323
324 /* We're about to requeue the skb, so return resources
325 * to its current owner (a socket receive buffer).
326 */
327 skb_orphan(skb);
328
329 tunnel->stats.rx_packets++;
330 tunnel->stats.rx_bytes += length;
331 session->stats.rx_packets++;
332 session->stats.rx_bytes += length;
333
334 if (L2TP_SKB_CB(skb)->has_seq) {
335 /* Bump our Nr */
336 session->nr++;
337 if (tunnel->version == L2TP_HDR_VER_2)
338 session->nr &= 0xffff;
339 else
340 session->nr &= 0xffffff;
341
342 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
343 "%s: updated nr to %hu\n", session->name, session->nr);
344 }
345
346 /* call private receive handler */
347 if (session->recv_skb != NULL)
348 (*session->recv_skb)(session, skb, L2TP_SKB_CB(skb)->length);
349 else
350 kfree_skb(skb);
351
352 if (session->deref)
353 (*session->deref)(session);
354}
355
356/* Dequeue skbs from the session's reorder_q, subject to packet order.
357 * Skbs that have been in the queue for too long are simply discarded.
358 */
359static void l2tp_recv_dequeue(struct l2tp_session *session)
360{
361 struct sk_buff *skb;
362 struct sk_buff *tmp;
363
364 /* If the pkt at the head of the queue has the nr that we
365 * expect to send up next, dequeue it and any other
366 * in-sequence packets behind it.
367 */
368 spin_lock_bh(&session->reorder_q.lock);
369 skb_queue_walk_safe(&session->reorder_q, skb, tmp) {
370 if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) {
371 session->stats.rx_seq_discards++;
372 session->stats.rx_errors++;
373 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
374 "%s: oos pkt %u len %d discarded (too old), "
375 "waiting for %u, reorder_q_len=%d\n",
376 session->name, L2TP_SKB_CB(skb)->ns,
377 L2TP_SKB_CB(skb)->length, session->nr,
378 skb_queue_len(&session->reorder_q));
379 __skb_unlink(skb, &session->reorder_q);
380 kfree_skb(skb);
381 if (session->deref)
382 (*session->deref)(session);
383 continue;
384 }
385
386 if (L2TP_SKB_CB(skb)->has_seq) {
387 if (L2TP_SKB_CB(skb)->ns != session->nr) {
388 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
389 "%s: holding oos pkt %u len %d, "
390 "waiting for %u, reorder_q_len=%d\n",
391 session->name, L2TP_SKB_CB(skb)->ns,
392 L2TP_SKB_CB(skb)->length, session->nr,
393 skb_queue_len(&session->reorder_q));
394 goto out;
395 }
396 }
397 __skb_unlink(skb, &session->reorder_q);
398
399 /* Process the skb. We release the queue lock while we
400 * do so to let other contexts process the queue.
401 */
402 spin_unlock_bh(&session->reorder_q.lock);
403 l2tp_recv_dequeue_skb(session, skb);
404 spin_lock_bh(&session->reorder_q.lock);
405 }
406
407out:
408 spin_unlock_bh(&session->reorder_q.lock);
409}
410
411static inline int l2tp_verify_udp_checksum(struct sock *sk,
412 struct sk_buff *skb)
413{
414 struct udphdr *uh = udp_hdr(skb);
415 u16 ulen = ntohs(uh->len);
416 struct inet_sock *inet;
417 __wsum psum;
418
419 if (sk->sk_no_check || skb_csum_unnecessary(skb) || !uh->check)
420 return 0;
421
422 inet = inet_sk(sk);
423 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
424 IPPROTO_UDP, 0);
425
426 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
427 !csum_fold(csum_add(psum, skb->csum)))
428 return 0;
429
430 skb->csum = psum;
431
432 return __skb_checksum_complete(skb);
433}
434
435/* Do receive processing of L2TP data frames. We handle both L2TPv2
436 * and L2TPv3 data frames here.
437 *
438 * L2TPv2 Data Message Header
439 *
440 * 0 1 2 3
441 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
442 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
443 * |T|L|x|x|S|x|O|P|x|x|x|x| Ver | Length (opt) |
444 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
445 * | Tunnel ID | Session ID |
446 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
447 * | Ns (opt) | Nr (opt) |
448 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
449 * | Offset Size (opt) | Offset pad... (opt)
450 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
451 *
452 * Data frames are marked by T=0. All other fields are the same as
453 * those in L2TP control frames.
454 *
455 * L2TPv3 Data Message Header
456 *
457 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
458 * | L2TP Session Header |
459 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
460 * | L2-Specific Sublayer |
461 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
462 * | Tunnel Payload ...
463 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
464 *
465 * L2TPv3 Session Header Over IP
466 *
467 * 0 1 2 3
468 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
469 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
470 * | Session ID |
471 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
472 * | Cookie (optional, maximum 64 bits)...
473 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
474 * |
475 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
476 *
477 * L2TPv3 L2-Specific Sublayer Format
478 *
479 * 0 1 2 3
480 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
481 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
482 * |x|S|x|x|x|x|x|x| Sequence Number |
483 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
484 *
485 * Cookie value, sublayer format and offset (pad) are negotiated with
486 * the peer when the session is set up. Unlike L2TPv2, we do not need
487 * to parse the packet header to determine if optional fields are
488 * present.
489 *
490 * Caller must already have parsed the frame and determined that it is
491 * a data (not control) frame before coming here. Fields up to the
492 * session-id have already been parsed and ptr points to the data
493 * after the session-id.
494 */
495void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
496 unsigned char *ptr, unsigned char *optr, u16 hdrflags,
497 int length, int (*payload_hook)(struct sk_buff *skb))
498{
499 struct l2tp_tunnel *tunnel = session->tunnel;
500 int offset;
501 u32 ns, nr;
502
503 /* The ref count is increased since we now hold a pointer to
504 * the session. Take care to decrement the refcnt when exiting
505 * this function from now on...
506 */
507 l2tp_session_inc_refcount(session);
508 if (session->ref)
509 (*session->ref)(session);
510
511 /* Parse and check optional cookie */
512 if (session->peer_cookie_len > 0) {
513 if (memcmp(ptr, &session->peer_cookie[0], session->peer_cookie_len)) {
514 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
515 "%s: cookie mismatch (%u/%u). Discarding.\n",
516 tunnel->name, tunnel->tunnel_id, session->session_id);
517 session->stats.rx_cookie_discards++;
518 goto discard;
519 }
520 ptr += session->peer_cookie_len;
521 }
522
523 /* Handle the optional sequence numbers. Sequence numbers are
524 * in different places for L2TPv2 and L2TPv3.
525 *
526 * If we are the LAC, enable/disable sequence numbers under
527 * the control of the LNS. If no sequence numbers present but
528 * we were expecting them, discard frame.
529 */
530 ns = nr = 0;
531 L2TP_SKB_CB(skb)->has_seq = 0;
532 if (tunnel->version == L2TP_HDR_VER_2) {
533 if (hdrflags & L2TP_HDRFLAG_S) {
534 ns = ntohs(*(__be16 *) ptr);
535 ptr += 2;
536 nr = ntohs(*(__be16 *) ptr);
537 ptr += 2;
538
539 /* Store L2TP info in the skb */
540 L2TP_SKB_CB(skb)->ns = ns;
541 L2TP_SKB_CB(skb)->has_seq = 1;
542
543 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
544 "%s: recv data ns=%u, nr=%u, session nr=%u\n",
545 session->name, ns, nr, session->nr);
546 }
547 } else if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
548 u32 l2h = ntohl(*(__be32 *) ptr);
549
550 if (l2h & 0x40000000) {
551 ns = l2h & 0x00ffffff;
552
553 /* Store L2TP info in the skb */
554 L2TP_SKB_CB(skb)->ns = ns;
555 L2TP_SKB_CB(skb)->has_seq = 1;
556
557 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
558 "%s: recv data ns=%u, session nr=%u\n",
559 session->name, ns, session->nr);
560 }
561 }
562
563 /* Advance past L2-specific header, if present */
564 ptr += session->l2specific_len;
565
566 if (L2TP_SKB_CB(skb)->has_seq) {
567 /* Received a packet with sequence numbers. If we're the LNS,
568 * check if we sre sending sequence numbers and if not,
569 * configure it so.
570 */
571 if ((!session->lns_mode) && (!session->send_seq)) {
572 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
573 "%s: requested to enable seq numbers by LNS\n",
574 session->name);
575 session->send_seq = -1;
576 l2tp_session_set_header_len(session, tunnel->version);
577 }
578 } else {
579 /* No sequence numbers.
580 * If user has configured mandatory sequence numbers, discard.
581 */
582 if (session->recv_seq) {
583 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
584 "%s: recv data has no seq numbers when required. "
585 "Discarding\n", session->name);
586 session->stats.rx_seq_discards++;
587 goto discard;
588 }
589
590 /* If we're the LAC and we're sending sequence numbers, the
591 * LNS has requested that we no longer send sequence numbers.
592 * If we're the LNS and we're sending sequence numbers, the
593 * LAC is broken. Discard the frame.
594 */
595 if ((!session->lns_mode) && (session->send_seq)) {
596 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_INFO,
597 "%s: requested to disable seq numbers by LNS\n",
598 session->name);
599 session->send_seq = 0;
600 l2tp_session_set_header_len(session, tunnel->version);
601 } else if (session->send_seq) {
602 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_WARNING,
603 "%s: recv data has no seq numbers when required. "
604 "Discarding\n", session->name);
605 session->stats.rx_seq_discards++;
606 goto discard;
607 }
608 }
609
610 /* Session data offset is handled differently for L2TPv2 and
611 * L2TPv3. For L2TPv2, there is an optional 16-bit value in
612 * the header. For L2TPv3, the offset is negotiated using AVPs
613 * in the session setup control protocol.
614 */
615 if (tunnel->version == L2TP_HDR_VER_2) {
616 /* If offset bit set, skip it. */
617 if (hdrflags & L2TP_HDRFLAG_O) {
618 offset = ntohs(*(__be16 *)ptr);
619 ptr += 2 + offset;
620 }
621 } else
622 ptr += session->offset;
623
624 offset = ptr - optr;
625 if (!pskb_may_pull(skb, offset))
626 goto discard;
627
628 __skb_pull(skb, offset);
629
630 /* If caller wants to process the payload before we queue the
631 * packet, do so now.
632 */
633 if (payload_hook)
634 if ((*payload_hook)(skb))
635 goto discard;
636
637 /* Prepare skb for adding to the session's reorder_q. Hold
638 * packets for max reorder_timeout or 1 second if not
639 * reordering.
640 */
641 L2TP_SKB_CB(skb)->length = length;
642 L2TP_SKB_CB(skb)->expires = jiffies +
643 (session->reorder_timeout ? session->reorder_timeout : HZ);
644
645 /* Add packet to the session's receive queue. Reordering is done here, if
646 * enabled. Saved L2TP protocol info is stored in skb->sb[].
647 */
648 if (L2TP_SKB_CB(skb)->has_seq) {
649 if (session->reorder_timeout != 0) {
650 /* Packet reordering enabled. Add skb to session's
651 * reorder queue, in order of ns.
652 */
653 l2tp_recv_queue_skb(session, skb);
654 } else {
655 /* Packet reordering disabled. Discard out-of-sequence
656 * packets
657 */
658 if (L2TP_SKB_CB(skb)->ns != session->nr) {
659 session->stats.rx_seq_discards++;
660 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
661 "%s: oos pkt %u len %d discarded, "
662 "waiting for %u, reorder_q_len=%d\n",
663 session->name, L2TP_SKB_CB(skb)->ns,
664 L2TP_SKB_CB(skb)->length, session->nr,
665 skb_queue_len(&session->reorder_q));
666 goto discard;
667 }
668 skb_queue_tail(&session->reorder_q, skb);
669 }
670 } else {
671 /* No sequence numbers. Add the skb to the tail of the
672 * reorder queue. This ensures that it will be
673 * delivered after all previous sequenced skbs.
674 */
675 skb_queue_tail(&session->reorder_q, skb);
676 }
677
678 /* Try to dequeue as many skbs from reorder_q as we can. */
679 l2tp_recv_dequeue(session);
680
681 l2tp_session_dec_refcount(session);
682
683 return;
684
685discard:
686 session->stats.rx_errors++;
687 kfree_skb(skb);
688
689 if (session->deref)
690 (*session->deref)(session);
691
692 l2tp_session_dec_refcount(session);
693}
694EXPORT_SYMBOL(l2tp_recv_common);
695
696/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame
697 * here. The skb is not on a list when we get here.
698 * Returns 0 if the packet was a data packet and was successfully passed on.
699 * Returns 1 if the packet was not a good data packet and could not be
700 * forwarded. All such packets are passed up to userspace to deal with.
701 */
702int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,
703 int (*payload_hook)(struct sk_buff *skb))
704{
705 struct l2tp_session *session = NULL;
706 unsigned char *ptr, *optr;
707 u16 hdrflags;
708 u32 tunnel_id, session_id;
709 int offset;
710 u16 version;
711 int length;
712
713 if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))
714 goto discard_bad_csum;
715
716 /* UDP always verifies the packet length. */
717 __skb_pull(skb, sizeof(struct udphdr));
718
719 /* Short packet? */
720 if (!pskb_may_pull(skb, L2TP_HDR_SIZE_SEQ)) {
721 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
722 "%s: recv short packet (len=%d)\n", tunnel->name, skb->len);
723 goto error;
724 }
725
726 /* Point to L2TP header */
727 optr = ptr = skb->data;
728
729 /* Trace packet contents, if enabled */
730 if (tunnel->debug & L2TP_MSG_DATA) {
731 length = min(32u, skb->len);
732 if (!pskb_may_pull(skb, length))
733 goto error;
734
735 printk(KERN_DEBUG "%s: recv: ", tunnel->name);
736
737 offset = 0;
738 do {
739 printk(" %02X", ptr[offset]);
740 } while (++offset < length);
741
742 printk("\n");
743 }
744
745 /* Get L2TP header flags */
746 hdrflags = ntohs(*(__be16 *) ptr);
747
748 /* Check protocol version */
749 version = hdrflags & L2TP_HDR_VER_MASK;
750 if (version != tunnel->version) {
751 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
752 "%s: recv protocol version mismatch: got %d expected %d\n",
753 tunnel->name, version, tunnel->version);
754 goto error;
755 }
756
757 /* Get length of L2TP packet */
758 length = skb->len;
759
760 /* If type is control packet, it is handled by userspace. */
761 if (hdrflags & L2TP_HDRFLAG_T) {
762 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
763 "%s: recv control packet, len=%d\n", tunnel->name, length);
764 goto error;
765 }
766
767 /* Skip flags */
768 ptr += 2;
769
770 if (tunnel->version == L2TP_HDR_VER_2) {
771 /* If length is present, skip it */
772 if (hdrflags & L2TP_HDRFLAG_L)
773 ptr += 2;
774
775 /* Extract tunnel and session ID */
776 tunnel_id = ntohs(*(__be16 *) ptr);
777 ptr += 2;
778 session_id = ntohs(*(__be16 *) ptr);
779 ptr += 2;
780 } else {
781 ptr += 2; /* skip reserved bits */
782 tunnel_id = tunnel->tunnel_id;
783 session_id = ntohl(*(__be32 *) ptr);
784 ptr += 4;
785 }
786
787 /* Find the session context */
788 session = l2tp_session_find(tunnel->l2tp_net, tunnel, session_id);
789 if (!session || !session->recv_skb) {
790 /* Not found? Pass to userspace to deal with */
791 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_INFO,
792 "%s: no session found (%u/%u). Passing up.\n",
793 tunnel->name, tunnel_id, session_id);
794 goto error;
795 }
796
797 l2tp_recv_common(session, skb, ptr, optr, hdrflags, length, payload_hook);
798
799 return 0;
800
801discard_bad_csum:
802 LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);
803 UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0);
804 tunnel->stats.rx_errors++;
805 kfree_skb(skb);
806
807 return 0;
808
809error:
810 /* Put UDP header back */
811 __skb_push(skb, sizeof(struct udphdr));
812
813 return 1;
814}
815EXPORT_SYMBOL_GPL(l2tp_udp_recv_core);
816
817/* UDP encapsulation receive handler. See net/ipv4/udp.c.
818 * Return codes:
819 * 0 : success.
820 * <0: error
821 * >0: skb should be passed up to userspace as UDP.
822 */
823int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
824{
825 struct l2tp_tunnel *tunnel;
826
827 tunnel = l2tp_sock_to_tunnel(sk);
828 if (tunnel == NULL)
829 goto pass_up;
830
831 PRINTK(tunnel->debug, L2TP_MSG_DATA, KERN_DEBUG,
832 "%s: received %d bytes\n", tunnel->name, skb->len);
833
834 if (l2tp_udp_recv_core(tunnel, skb, tunnel->recv_payload_hook))
835 goto pass_up_put;
836
837 sock_put(sk);
838 return 0;
839
840pass_up_put:
841 sock_put(sk);
842pass_up:
843 return 1;
844}
845EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv);
846
847/************************************************************************
848 * Transmit handling
849 ***********************************************************************/
850
851/* Build an L2TP header for the session into the buffer provided.
852 */
853static int l2tp_build_l2tpv2_header(struct l2tp_session *session, void *buf)
854{
855 struct l2tp_tunnel *tunnel = session->tunnel;
856 __be16 *bufp = buf;
857 __be16 *optr = buf;
858 u16 flags = L2TP_HDR_VER_2;
859 u32 tunnel_id = tunnel->peer_tunnel_id;
860 u32 session_id = session->peer_session_id;
861
862 if (session->send_seq)
863 flags |= L2TP_HDRFLAG_S;
864
865 /* Setup L2TP header. */
866 *bufp++ = htons(flags);
867 *bufp++ = htons(tunnel_id);
868 *bufp++ = htons(session_id);
869 if (session->send_seq) {
870 *bufp++ = htons(session->ns);
871 *bufp++ = 0;
872 session->ns++;
873 session->ns &= 0xffff;
874 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
875 "%s: updated ns to %u\n", session->name, session->ns);
876 }
877
878 return bufp - optr;
879}
880
881static int l2tp_build_l2tpv3_header(struct l2tp_session *session, void *buf)
882{
883 struct l2tp_tunnel *tunnel = session->tunnel;
884 char *bufp = buf;
885 char *optr = bufp;
886
887 /* Setup L2TP header. The header differs slightly for UDP and
888 * IP encapsulations. For UDP, there is 4 bytes of flags.
889 */
890 if (tunnel->encap == L2TP_ENCAPTYPE_UDP) {
891 u16 flags = L2TP_HDR_VER_3;
892 *((__be16 *) bufp) = htons(flags);
893 bufp += 2;
894 *((__be16 *) bufp) = 0;
895 bufp += 2;
896 }
897
898 *((__be32 *) bufp) = htonl(session->peer_session_id);
899 bufp += 4;
900 if (session->cookie_len) {
901 memcpy(bufp, &session->cookie[0], session->cookie_len);
902 bufp += session->cookie_len;
903 }
904 if (session->l2specific_len) {
905 if (session->l2specific_type == L2TP_L2SPECTYPE_DEFAULT) {
906 u32 l2h = 0;
907 if (session->send_seq) {
908 l2h = 0x40000000 | session->ns;
909 session->ns++;
910 session->ns &= 0xffffff;
911 PRINTK(session->debug, L2TP_MSG_SEQ, KERN_DEBUG,
912 "%s: updated ns to %u\n", session->name, session->ns);
913 }
914
915 *((__be32 *) bufp) = htonl(l2h);
916 }
917 bufp += session->l2specific_len;
918 }
919 if (session->offset)
920 bufp += session->offset;
921
922 return bufp - optr;
923}
924
925int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len)
926{
927 struct l2tp_tunnel *tunnel = session->tunnel;
928 unsigned int len = skb->len;
929 int error;
930
931 /* Debug */
932 if (session->send_seq)
933 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
934 "%s: send %Zd bytes, ns=%u\n", session->name,
935 data_len, session->ns - 1);
936 else
937 PRINTK(session->debug, L2TP_MSG_DATA, KERN_DEBUG,
938 "%s: send %Zd bytes\n", session->name, data_len);
939
940 if (session->debug & L2TP_MSG_DATA) {
941 int i;
942 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
943 unsigned char *datap = skb->data + uhlen;
944
945 printk(KERN_DEBUG "%s: xmit:", session->name);
946 for (i = 0; i < (len - uhlen); i++) {
947 printk(" %02X", *datap++);
948 if (i == 31) {
949 printk(" ...");
950 break;
951 }
952 }
953 printk("\n");
954 }
955
956 /* Queue the packet to IP for output */
957 skb->local_df = 1;
958 error = ip_queue_xmit(skb);
959
960 /* Update stats */
961 if (error >= 0) {
962 tunnel->stats.tx_packets++;
963 tunnel->stats.tx_bytes += len;
964 session->stats.tx_packets++;
965 session->stats.tx_bytes += len;
966 } else {
967 tunnel->stats.tx_errors++;
968 session->stats.tx_errors++;
969 }
970
971 return 0;
972}
973EXPORT_SYMBOL_GPL(l2tp_xmit_core);
974
975/* Automatically called when the skb is freed.
976 */
977static void l2tp_sock_wfree(struct sk_buff *skb)
978{
979 sock_put(skb->sk);
980}
981
982/* For data skbs that we transmit, we associate with the tunnel socket
983 * but don't do accounting.
984 */
985static inline void l2tp_skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
986{
987 sock_hold(sk);
988 skb->sk = sk;
989 skb->destructor = l2tp_sock_wfree;
990}
991
992/* If caller requires the skb to have a ppp header, the header must be
993 * inserted in the skb data before calling this function.
994 */
995int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len)
996{
997 int data_len = skb->len;
998 struct l2tp_tunnel *tunnel = session->tunnel;
999 struct sock *sk = tunnel->sock;
1000 struct udphdr *uh;
1001 struct inet_sock *inet;
1002 __wsum csum;
1003 int old_headroom;
1004 int new_headroom;
1005 int headroom;
1006 int uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
1007 int udp_len;
1008
1009 /* Check that there's enough headroom in the skb to insert IP,
1010 * UDP and L2TP headers. If not enough, expand it to
1011 * make room. Adjust truesize.
1012 */
1013 headroom = NET_SKB_PAD + sizeof(struct iphdr) +
1014 uhlen + hdr_len;
1015 old_headroom = skb_headroom(skb);
1016 if (skb_cow_head(skb, headroom))
1017 goto abort;
1018
1019 new_headroom = skb_headroom(skb);
1020 skb_orphan(skb);
1021 skb->truesize += new_headroom - old_headroom;
1022
1023 /* Setup L2TP header */
1024 session->build_header(session, __skb_push(skb, hdr_len));
1025
1026 /* Reset skb netfilter state */
1027 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
1028 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
1029 IPSKB_REROUTED);
1030 nf_reset(skb);
1031
1032 /* Get routing info from the tunnel socket */
1033 skb_dst_drop(skb);
1034 skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
1035
1036 switch (tunnel->encap) {
1037 case L2TP_ENCAPTYPE_UDP:
1038 /* Setup UDP header */
1039 inet = inet_sk(sk);
1040 __skb_push(skb, sizeof(*uh));
1041 skb_reset_transport_header(skb);
1042 uh = udp_hdr(skb);
1043 uh->source = inet->inet_sport;
1044 uh->dest = inet->inet_dport;
1045 udp_len = uhlen + hdr_len + data_len;
1046 uh->len = htons(udp_len);
1047 uh->check = 0;
1048
1049 /* Calculate UDP checksum if configured to do so */
1050 if (sk->sk_no_check == UDP_CSUM_NOXMIT)
1051 skb->ip_summed = CHECKSUM_NONE;
1052 else if ((skb_dst(skb) && skb_dst(skb)->dev) &&
1053 (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM))) {
1054 skb->ip_summed = CHECKSUM_COMPLETE;
1055 csum = skb_checksum(skb, 0, udp_len, 0);
1056 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1057 inet->inet_daddr,
1058 udp_len, IPPROTO_UDP, csum);
1059 if (uh->check == 0)
1060 uh->check = CSUM_MANGLED_0;
1061 } else {
1062 skb->ip_summed = CHECKSUM_PARTIAL;
1063 skb->csum_start = skb_transport_header(skb) - skb->head;
1064 skb->csum_offset = offsetof(struct udphdr, check);
1065 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1066 inet->inet_daddr,
1067 udp_len, IPPROTO_UDP, 0);
1068 }
1069 break;
1070
1071 case L2TP_ENCAPTYPE_IP:
1072 break;
1073 }
1074
1075 l2tp_skb_set_owner_w(skb, sk);
1076
1077 l2tp_xmit_core(session, skb, data_len);
1078
1079abort:
1080 return 0;
1081}
1082EXPORT_SYMBOL_GPL(l2tp_xmit_skb);
1083
1084/*****************************************************************************
1085 * Tinnel and session create/destroy.
1086 *****************************************************************************/
1087
1088/* Tunnel socket destruct hook.
1089 * The tunnel context is deleted only when all session sockets have been
1090 * closed.
1091 */
1092void l2tp_tunnel_destruct(struct sock *sk)
1093{
1094 struct l2tp_tunnel *tunnel;
1095
1096 tunnel = sk->sk_user_data;
1097 if (tunnel == NULL)
1098 goto end;
1099
1100 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1101 "%s: closing...\n", tunnel->name);
1102
1103 /* Close all sessions */
1104 l2tp_tunnel_closeall(tunnel);
1105
1106 switch (tunnel->encap) {
1107 case L2TP_ENCAPTYPE_UDP:
1108 /* No longer an encapsulation socket. See net/ipv4/udp.c */
1109 (udp_sk(sk))->encap_type = 0;
1110 (udp_sk(sk))->encap_rcv = NULL;
1111 break;
1112 case L2TP_ENCAPTYPE_IP:
1113 break;
1114 }
1115
1116 /* Remove hooks into tunnel socket */
1117 tunnel->sock = NULL;
1118 sk->sk_destruct = tunnel->old_sk_destruct;
1119 sk->sk_user_data = NULL;
1120
1121 /* Call the original destructor */
1122 if (sk->sk_destruct)
1123 (*sk->sk_destruct)(sk);
1124
1125 /* We're finished with the socket */
1126 l2tp_tunnel_dec_refcount(tunnel);
1127
1128end:
1129 return;
1130}
1131EXPORT_SYMBOL(l2tp_tunnel_destruct);
1132
1133/* When the tunnel is closed, all the attached sessions need to go too.
1134 */
1135void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)
1136{
1137 int hash;
1138 struct hlist_node *walk;
1139 struct hlist_node *tmp;
1140 struct l2tp_session *session;
1141
1142 BUG_ON(tunnel == NULL);
1143
1144 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1145 "%s: closing all sessions...\n", tunnel->name);
1146
1147 write_lock_bh(&tunnel->hlist_lock);
1148 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
1149again:
1150 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
1151 session = hlist_entry(walk, struct l2tp_session, hlist);
1152
1153 PRINTK(session->debug, L2TP_MSG_CONTROL, KERN_INFO,
1154 "%s: closing session\n", session->name);
1155
1156 hlist_del_init(&session->hlist);
1157
1158 /* Since we should hold the sock lock while
1159 * doing any unbinding, we need to release the
1160 * lock we're holding before taking that lock.
1161 * Hold a reference to the sock so it doesn't
1162 * disappear as we're jumping between locks.
1163 */
1164 if (session->ref != NULL)
1165 (*session->ref)(session);
1166
1167 write_unlock_bh(&tunnel->hlist_lock);
1168
1169 if (tunnel->version != L2TP_HDR_VER_2) {
1170 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1171
1172 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1173 hlist_del_init_rcu(&session->global_hlist);
1174 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1175 synchronize_rcu();
1176 }
1177
1178 if (session->session_close != NULL)
1179 (*session->session_close)(session);
1180
1181 if (session->deref != NULL)
1182 (*session->deref)(session);
1183
1184 write_lock_bh(&tunnel->hlist_lock);
1185
1186 /* Now restart from the beginning of this hash
1187 * chain. We always remove a session from the
1188 * list so we are guaranteed to make forward
1189 * progress.
1190 */
1191 goto again;
1192 }
1193 }
1194 write_unlock_bh(&tunnel->hlist_lock);
1195}
1196EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall);
1197
1198/* Really kill the tunnel.
1199 * Come here only when all sessions have been cleared from the tunnel.
1200 */
1201void l2tp_tunnel_free(struct l2tp_tunnel *tunnel)
1202{
1203 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1204
1205 BUG_ON(atomic_read(&tunnel->ref_count) != 0);
1206 BUG_ON(tunnel->sock != NULL);
1207
1208 PRINTK(tunnel->debug, L2TP_MSG_CONTROL, KERN_INFO,
1209 "%s: free...\n", tunnel->name);
1210
1211 /* Remove from tunnel list */
1212 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1213 list_del_rcu(&tunnel->list);
1214 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1215 synchronize_rcu();
1216
1217 atomic_dec(&l2tp_tunnel_count);
1218 kfree(tunnel);
1219}
1220EXPORT_SYMBOL_GPL(l2tp_tunnel_free);
1221
1222/* Create a socket for the tunnel, if one isn't set up by
1223 * userspace. This is used for static tunnels where there is no
1224 * managing L2TP daemon.
1225 */
1226static int l2tp_tunnel_sock_create(u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct socket **sockp)
1227{
1228 int err = -EINVAL;
1229 struct sockaddr_in udp_addr;
1230 struct sockaddr_l2tpip ip_addr;
1231 struct socket *sock = NULL;
1232
1233 switch (cfg->encap) {
1234 case L2TP_ENCAPTYPE_UDP:
1235 err = sock_create(AF_INET, SOCK_DGRAM, 0, sockp);
1236 if (err < 0)
1237 goto out;
1238
1239 sock = *sockp;
1240
1241 memset(&udp_addr, 0, sizeof(udp_addr));
1242 udp_addr.sin_family = AF_INET;
1243 udp_addr.sin_addr = cfg->local_ip;
1244 udp_addr.sin_port = htons(cfg->local_udp_port);
1245 err = kernel_bind(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr));
1246 if (err < 0)
1247 goto out;
1248
1249 udp_addr.sin_family = AF_INET;
1250 udp_addr.sin_addr = cfg->peer_ip;
1251 udp_addr.sin_port = htons(cfg->peer_udp_port);
1252 err = kernel_connect(sock, (struct sockaddr *) &udp_addr, sizeof(udp_addr), 0);
1253 if (err < 0)
1254 goto out;
1255
1256 if (!cfg->use_udp_checksums)
1257 sock->sk->sk_no_check = UDP_CSUM_NOXMIT;
1258
1259 break;
1260
1261 case L2TP_ENCAPTYPE_IP:
1262 err = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_L2TP, sockp);
1263 if (err < 0)
1264 goto out;
1265
1266 sock = *sockp;
1267
1268 memset(&ip_addr, 0, sizeof(ip_addr));
1269 ip_addr.l2tp_family = AF_INET;
1270 ip_addr.l2tp_addr = cfg->local_ip;
1271 ip_addr.l2tp_conn_id = tunnel_id;
1272 err = kernel_bind(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr));
1273 if (err < 0)
1274 goto out;
1275
1276 ip_addr.l2tp_family = AF_INET;
1277 ip_addr.l2tp_addr = cfg->peer_ip;
1278 ip_addr.l2tp_conn_id = peer_tunnel_id;
1279 err = kernel_connect(sock, (struct sockaddr *) &ip_addr, sizeof(ip_addr), 0);
1280 if (err < 0)
1281 goto out;
1282
1283 break;
1284
1285 default:
1286 goto out;
1287 }
1288
1289out:
1290 if ((err < 0) && sock) {
1291 sock_release(sock);
1292 *sockp = NULL;
1293 }
1294
1295 return err;
1296}
1297
1298int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp)
1299{
1300 struct l2tp_tunnel *tunnel = NULL;
1301 int err;
1302 struct socket *sock = NULL;
1303 struct sock *sk = NULL;
1304 struct l2tp_net *pn;
1305 enum l2tp_encap_type encap = L2TP_ENCAPTYPE_UDP;
1306
1307 /* Get the tunnel socket from the fd, which was opened by
1308 * the userspace L2TP daemon. If not specified, create a
1309 * kernel socket.
1310 */
1311 if (fd < 0) {
1312 err = l2tp_tunnel_sock_create(tunnel_id, peer_tunnel_id, cfg, &sock);
1313 if (err < 0)
1314 goto err;
1315 } else {
1316 err = -EBADF;
1317 sock = sockfd_lookup(fd, &err);
1318 if (!sock) {
1319 printk(KERN_ERR "tunl %hu: sockfd_lookup(fd=%d) returned %d\n",
1320 tunnel_id, fd, err);
1321 goto err;
1322 }
1323 }
1324
1325 sk = sock->sk;
1326
1327 if (cfg != NULL)
1328 encap = cfg->encap;
1329
1330 /* Quick sanity checks */
1331 switch (encap) {
1332 case L2TP_ENCAPTYPE_UDP:
1333 err = -EPROTONOSUPPORT;
1334 if (sk->sk_protocol != IPPROTO_UDP) {
1335 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1336 tunnel_id, fd, sk->sk_protocol, IPPROTO_UDP);
1337 goto err;
1338 }
1339 break;
1340 case L2TP_ENCAPTYPE_IP:
1341 err = -EPROTONOSUPPORT;
1342 if (sk->sk_protocol != IPPROTO_L2TP) {
1343 printk(KERN_ERR "tunl %hu: fd %d wrong protocol, got %d, expected %d\n",
1344 tunnel_id, fd, sk->sk_protocol, IPPROTO_L2TP);
1345 goto err;
1346 }
1347 break;
1348 }
1349
1350 /* Check if this socket has already been prepped */
1351 tunnel = (struct l2tp_tunnel *)sk->sk_user_data;
1352 if (tunnel != NULL) {
1353 /* This socket has already been prepped */
1354 err = -EBUSY;
1355 goto err;
1356 }
1357
1358 tunnel = kzalloc(sizeof(struct l2tp_tunnel), GFP_KERNEL);
1359 if (tunnel == NULL) {
1360 err = -ENOMEM;
1361 goto err;
1362 }
1363
1364 tunnel->version = version;
1365 tunnel->tunnel_id = tunnel_id;
1366 tunnel->peer_tunnel_id = peer_tunnel_id;
1367 tunnel->debug = L2TP_DEFAULT_DEBUG_FLAGS;
1368
1369 tunnel->magic = L2TP_TUNNEL_MAGIC;
1370 sprintf(&tunnel->name[0], "tunl %u", tunnel_id);
1371 rwlock_init(&tunnel->hlist_lock);
1372
1373 /* The net we belong to */
1374 tunnel->l2tp_net = net;
1375 pn = l2tp_pernet(net);
1376
1377 if (cfg != NULL)
1378 tunnel->debug = cfg->debug;
1379
1380 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1381 tunnel->encap = encap;
1382 if (encap == L2TP_ENCAPTYPE_UDP) {
1383 /* Mark socket as an encapsulation socket. See net/ipv4/udp.c */
1384 udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;
1385 udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv;
1386 }
1387
1388 sk->sk_user_data = tunnel;
1389
1390 /* Hook on the tunnel socket destructor so that we can cleanup
1391 * if the tunnel socket goes away.
1392 */
1393 tunnel->old_sk_destruct = sk->sk_destruct;
1394 sk->sk_destruct = &l2tp_tunnel_destruct;
1395 tunnel->sock = sk;
1396 sk->sk_allocation = GFP_ATOMIC;
1397
1398 /* Add tunnel to our list */
1399 INIT_LIST_HEAD(&tunnel->list);
1400 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1401 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1402 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1403 synchronize_rcu();
1404 atomic_inc(&l2tp_tunnel_count);
1405
1406 /* Bump the reference count. The tunnel context is deleted
1407 * only when this drops to zero.
1408 */
1409 l2tp_tunnel_inc_refcount(tunnel);
1410
1411 err = 0;
1412err:
1413 if (tunnelp)
1414 *tunnelp = tunnel;
1415
1416 /* If tunnel's socket was created by the kernel, it doesn't
1417 * have a file.
1418 */
1419 if (sock && sock->file)
1420 sockfd_put(sock);
1421
1422 return err;
1423}
1424EXPORT_SYMBOL_GPL(l2tp_tunnel_create);
1425
1426/* This function is used by the netlink TUNNEL_DELETE command.
1427 */
1428int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)
1429{
1430 int err = 0;
1431 struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL;
1432
1433 /* Force the tunnel socket to close. This will eventually
1434 * cause the tunnel to be deleted via the normal socket close
1435 * mechanisms when userspace closes the tunnel socket.
1436 */
1437 if (sock != NULL) {
1438 err = inet_shutdown(sock, 2);
1439
1440 /* If the tunnel's socket was created by the kernel,
1441 * close the socket here since the socket was not
1442 * created by userspace.
1443 */
1444 if (sock->file == NULL)
1445 err = inet_release(sock);
1446 }
1447
1448 return err;
1449}
1450EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);
1451
1452/* Really kill the session.
1453 */
1454void l2tp_session_free(struct l2tp_session *session)
1455{
1456 struct l2tp_tunnel *tunnel;
1457
1458 BUG_ON(atomic_read(&session->ref_count) != 0);
1459
1460 tunnel = session->tunnel;
1461 if (tunnel != NULL) {
1462 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
1463
1464 /* Delete the session from the hash */
1465 write_lock_bh(&tunnel->hlist_lock);
1466 hlist_del_init(&session->hlist);
1467 write_unlock_bh(&tunnel->hlist_lock);
1468
1469 /* Unlink from the global hash if not L2TPv2 */
1470 if (tunnel->version != L2TP_HDR_VER_2) {
1471 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1472
1473 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1474 hlist_del_init_rcu(&session->global_hlist);
1475 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1476 synchronize_rcu();
1477 }
1478
1479 if (session->session_id != 0)
1480 atomic_dec(&l2tp_session_count);
1481
1482 sock_put(tunnel->sock);
1483
1484 /* This will delete the tunnel context if this
1485 * is the last session on the tunnel.
1486 */
1487 session->tunnel = NULL;
1488 l2tp_tunnel_dec_refcount(tunnel);
1489 }
1490
1491 kfree(session);
1492
1493 return;
1494}
1495EXPORT_SYMBOL_GPL(l2tp_session_free);
1496
1497/* This function is used by the netlink SESSION_DELETE command and by
1498 pseudowire modules.
1499 */
1500int l2tp_session_delete(struct l2tp_session *session)
1501{
1502 if (session->session_close != NULL)
1503 (*session->session_close)(session);
1504
1505 l2tp_session_dec_refcount(session);
1506
1507 return 0;
1508}
1509EXPORT_SYMBOL_GPL(l2tp_session_delete);
1510
1511
1512/* We come here whenever a session's send_seq, cookie_len or
1513 * l2specific_len parameters are set.
1514 */
1515void l2tp_session_set_header_len(struct l2tp_session *session, int version)
1516{
1517 if (version == L2TP_HDR_VER_2) {
1518 session->hdr_len = 6;
1519 if (session->send_seq)
1520 session->hdr_len += 4;
1521 } else {
1522 session->hdr_len = 4 + session->cookie_len + session->l2specific_len + session->offset;
1523 if (session->tunnel->encap == L2TP_ENCAPTYPE_UDP)
1524 session->hdr_len += 4;
1525 }
1526
1527}
1528EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
1529
1530struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
1531{
1532 struct l2tp_session *session;
1533
1534 session = kzalloc(sizeof(struct l2tp_session) + priv_size, GFP_KERNEL);
1535 if (session != NULL) {
1536 session->magic = L2TP_SESSION_MAGIC;
1537 session->tunnel = tunnel;
1538
1539 session->session_id = session_id;
1540 session->peer_session_id = peer_session_id;
1541 session->nr = 1;
1542
1543 sprintf(&session->name[0], "sess %u/%u",
1544 tunnel->tunnel_id, session->session_id);
1545
1546 skb_queue_head_init(&session->reorder_q);
1547
1548 INIT_HLIST_NODE(&session->hlist);
1549 INIT_HLIST_NODE(&session->global_hlist);
1550
1551 /* Inherit debug options from tunnel */
1552 session->debug = tunnel->debug;
1553
1554 if (cfg) {
1555 session->pwtype = cfg->pw_type;
1556 session->debug = cfg->debug;
1557 session->mtu = cfg->mtu;
1558 session->mru = cfg->mru;
1559 session->send_seq = cfg->send_seq;
1560 session->recv_seq = cfg->recv_seq;
1561 session->lns_mode = cfg->lns_mode;
1562 session->reorder_timeout = cfg->reorder_timeout;
1563 session->offset = cfg->offset;
1564 session->l2specific_type = cfg->l2specific_type;
1565 session->l2specific_len = cfg->l2specific_len;
1566 session->cookie_len = cfg->cookie_len;
1567 memcpy(&session->cookie[0], &cfg->cookie[0], cfg->cookie_len);
1568 session->peer_cookie_len = cfg->peer_cookie_len;
1569 memcpy(&session->peer_cookie[0], &cfg->peer_cookie[0], cfg->peer_cookie_len);
1570 }
1571
1572 if (tunnel->version == L2TP_HDR_VER_2)
1573 session->build_header = l2tp_build_l2tpv2_header;
1574 else
1575 session->build_header = l2tp_build_l2tpv3_header;
1576
1577 l2tp_session_set_header_len(session, tunnel->version);
1578
1579 /* Bump the reference count. The session context is deleted
1580 * only when this drops to zero.
1581 */
1582 l2tp_session_inc_refcount(session);
1583 l2tp_tunnel_inc_refcount(tunnel);
1584
1585 /* Ensure tunnel socket isn't deleted */
1586 sock_hold(tunnel->sock);
1587
1588 /* Add session to the tunnel's hash list */
1589 write_lock_bh(&tunnel->hlist_lock);
1590 hlist_add_head(&session->hlist,
1591 l2tp_session_id_hash(tunnel, session_id));
1592 write_unlock_bh(&tunnel->hlist_lock);
1593
1594 /* And to the global session list if L2TPv3 */
1595 if (tunnel->version != L2TP_HDR_VER_2) {
1596 struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net);
1597
1598 spin_lock_bh(&pn->l2tp_session_hlist_lock);
1599 hlist_add_head_rcu(&session->global_hlist,
1600 l2tp_session_id_hash_2(pn, session_id));
1601 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1602 synchronize_rcu();
1603 }
1604
1605 /* Ignore management session in session count value */
1606 if (session->session_id != 0)
1607 atomic_inc(&l2tp_session_count);
1608 }
1609
1610 return session;
1611}
1612EXPORT_SYMBOL_GPL(l2tp_session_create);
1613
1614/*****************************************************************************
1615 * Init and cleanup
1616 *****************************************************************************/
1617
1618static __net_init int l2tp_init_net(struct net *net)
1619{
1620 struct l2tp_net *pn;
1621 int err;
1622 int hash;
1623
1624 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
1625 if (!pn)
1626 return -ENOMEM;
1627
1628 INIT_LIST_HEAD(&pn->l2tp_tunnel_list);
1629 spin_lock_init(&pn->l2tp_tunnel_list_lock);
1630
1631 for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++)
1632 INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]);
1633
1634 spin_lock_init(&pn->l2tp_session_hlist_lock);
1635
1636 err = net_assign_generic(net, l2tp_net_id, pn);
1637 if (err)
1638 goto out;
1639
1640 return 0;
1641
1642out:
1643 kfree(pn);
1644 return err;
1645}
1646
1647static __net_exit void l2tp_exit_net(struct net *net)
1648{
1649 struct l2tp_net *pn;
1650
1651 pn = net_generic(net, l2tp_net_id);
1652 /*
1653 * if someone has cached our net then
1654 * further net_generic call will return NULL
1655 */
1656 net_assign_generic(net, l2tp_net_id, NULL);
1657 kfree(pn);
1658}
1659
1660static struct pernet_operations l2tp_net_ops = {
1661 .init = l2tp_init_net,
1662 .exit = l2tp_exit_net,
1663 .id = &l2tp_net_id,
1664 .size = sizeof(struct l2tp_net),
1665};
1666
1667static int __init l2tp_init(void)
1668{
1669 int rc = 0;
1670
1671 rc = register_pernet_device(&l2tp_net_ops);
1672 if (rc)
1673 goto out;
1674
1675 printk(KERN_INFO "L2TP core driver, %s\n", L2TP_DRV_VERSION);
1676
1677out:
1678 return rc;
1679}
1680
1681static void __exit l2tp_exit(void)
1682{
1683 unregister_pernet_device(&l2tp_net_ops);
1684}
1685
1686module_init(l2tp_init);
1687module_exit(l2tp_exit);
1688
1689MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1690MODULE_DESCRIPTION("L2TP core");
1691MODULE_LICENSE("GPL");
1692MODULE_VERSION(L2TP_DRV_VERSION);
1693
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
new file mode 100644
index 000000000000..f0f318edd3f1
--- /dev/null
+++ b/net/l2tp/l2tp_core.h
@@ -0,0 +1,304 @@
1/*
2 * L2TP internal definitions.
3 *
4 * Copyright (c) 2008,2009 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _L2TP_CORE_H_
12#define _L2TP_CORE_H_
13
14/* Just some random numbers */
15#define L2TP_TUNNEL_MAGIC 0x42114DDA
16#define L2TP_SESSION_MAGIC 0x0C04EB7D
17
18/* Per tunnel, session hash table size */
19#define L2TP_HASH_BITS 4
20#define L2TP_HASH_SIZE (1 << L2TP_HASH_BITS)
21
22/* System-wide, session hash table size */
23#define L2TP_HASH_BITS_2 8
24#define L2TP_HASH_SIZE_2 (1 << L2TP_HASH_BITS_2)
25
26/* Debug message categories for the DEBUG socket option */
27enum {
28 L2TP_MSG_DEBUG = (1 << 0), /* verbose debug (if
29 * compiled in) */
30 L2TP_MSG_CONTROL = (1 << 1), /* userspace - kernel
31 * interface */
32 L2TP_MSG_SEQ = (1 << 2), /* sequence numbers */
33 L2TP_MSG_DATA = (1 << 3), /* data packets */
34};
35
36struct sk_buff;
37
38struct l2tp_stats {
39 u64 tx_packets;
40 u64 tx_bytes;
41 u64 tx_errors;
42 u64 rx_packets;
43 u64 rx_bytes;
44 u64 rx_seq_discards;
45 u64 rx_oos_packets;
46 u64 rx_errors;
47 u64 rx_cookie_discards;
48};
49
50struct l2tp_tunnel;
51
52/* Describes a session. Contains information to determine incoming
53 * packets and transmit outgoing ones.
54 */
55struct l2tp_session_cfg {
56 enum l2tp_pwtype pw_type;
57 unsigned data_seq:2; /* data sequencing level
58 * 0 => none, 1 => IP only,
59 * 2 => all
60 */
61 unsigned recv_seq:1; /* expect receive packets with
62 * sequence numbers? */
63 unsigned send_seq:1; /* send packets with sequence
64 * numbers? */
65 unsigned lns_mode:1; /* behave as LNS? LAC enables
66 * sequence numbers under
67 * control of LNS. */
68 int debug; /* bitmask of debug message
69 * categories */
70 u16 vlan_id; /* VLAN pseudowire only */
71 u16 offset; /* offset to payload */
72 u16 l2specific_len; /* Layer 2 specific length */
73 u16 l2specific_type; /* Layer 2 specific type */
74 u8 cookie[8]; /* optional cookie */
75 int cookie_len; /* 0, 4 or 8 bytes */
76 u8 peer_cookie[8]; /* peer's cookie */
77 int peer_cookie_len; /* 0, 4 or 8 bytes */
78 int reorder_timeout; /* configured reorder timeout
79 * (in jiffies) */
80 int mtu;
81 int mru;
82 char *ifname;
83};
84
85struct l2tp_session {
86 int magic; /* should be
87 * L2TP_SESSION_MAGIC */
88
89 struct l2tp_tunnel *tunnel; /* back pointer to tunnel
90 * context */
91 u32 session_id;
92 u32 peer_session_id;
93 u8 cookie[8];
94 int cookie_len;
95 u8 peer_cookie[8];
96 int peer_cookie_len;
97 u16 offset; /* offset from end of L2TP header
98 to beginning of data */
99 u16 l2specific_len;
100 u16 l2specific_type;
101 u16 hdr_len;
102 u32 nr; /* session NR state (receive) */
103 u32 ns; /* session NR state (send) */
104 struct sk_buff_head reorder_q; /* receive reorder queue */
105 struct hlist_node hlist; /* Hash list node */
106 atomic_t ref_count;
107
108 char name[32]; /* for logging */
109 char ifname[IFNAMSIZ];
110 unsigned data_seq:2; /* data sequencing level
111 * 0 => none, 1 => IP only,
112 * 2 => all
113 */
114 unsigned recv_seq:1; /* expect receive packets with
115 * sequence numbers? */
116 unsigned send_seq:1; /* send packets with sequence
117 * numbers? */
118 unsigned lns_mode:1; /* behave as LNS? LAC enables
119 * sequence numbers under
120 * control of LNS. */
121 int debug; /* bitmask of debug message
122 * categories */
123 int reorder_timeout; /* configured reorder timeout
124 * (in jiffies) */
125 int mtu;
126 int mru;
127 enum l2tp_pwtype pwtype;
128 struct l2tp_stats stats;
129 struct hlist_node global_hlist; /* Global hash list node */
130
131 int (*build_header)(struct l2tp_session *session, void *buf);
132 void (*recv_skb)(struct l2tp_session *session, struct sk_buff *skb, int data_len);
133 void (*session_close)(struct l2tp_session *session);
134 void (*ref)(struct l2tp_session *session);
135 void (*deref)(struct l2tp_session *session);
136#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
137 void (*show)(struct seq_file *m, void *priv);
138#endif
139 uint8_t priv[0]; /* private data */
140};
141
142/* Describes the tunnel. It contains info to track all the associated
143 * sessions so incoming packets can be sorted out
144 */
145struct l2tp_tunnel_cfg {
146 int debug; /* bitmask of debug message
147 * categories */
148 enum l2tp_encap_type encap;
149
150 /* Used only for kernel-created sockets */
151 struct in_addr local_ip;
152 struct in_addr peer_ip;
153 u16 local_udp_port;
154 u16 peer_udp_port;
155 unsigned int use_udp_checksums:1;
156};
157
158struct l2tp_tunnel {
159 int magic; /* Should be L2TP_TUNNEL_MAGIC */
160 rwlock_t hlist_lock; /* protect session_hlist */
161 struct hlist_head session_hlist[L2TP_HASH_SIZE];
162 /* hashed list of sessions,
163 * hashed by id */
164 u32 tunnel_id;
165 u32 peer_tunnel_id;
166 int version; /* 2=>L2TPv2, 3=>L2TPv3 */
167
168 char name[20]; /* for logging */
169 int debug; /* bitmask of debug message
170 * categories */
171 enum l2tp_encap_type encap;
172 struct l2tp_stats stats;
173
174 struct list_head list; /* Keep a list of all tunnels */
175 struct net *l2tp_net; /* the net we belong to */
176
177 atomic_t ref_count;
178#ifdef CONFIG_DEBUG_FS
179 void (*show)(struct seq_file *m, void *arg);
180#endif
181 int (*recv_payload_hook)(struct sk_buff *skb);
182 void (*old_sk_destruct)(struct sock *);
183 struct sock *sock; /* Parent socket */
184 int fd;
185
186 uint8_t priv[0]; /* private data */
187};
188
189struct l2tp_nl_cmd_ops {
190 int (*session_create)(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
191 int (*session_delete)(struct l2tp_session *session);
192};
193
194static inline void *l2tp_tunnel_priv(struct l2tp_tunnel *tunnel)
195{
196 return &tunnel->priv[0];
197}
198
199static inline void *l2tp_session_priv(struct l2tp_session *session)
200{
201 return &session->priv[0];
202}
203
204static inline struct l2tp_tunnel *l2tp_sock_to_tunnel(struct sock *sk)
205{
206 struct l2tp_tunnel *tunnel;
207
208 if (sk == NULL)
209 return NULL;
210
211 sock_hold(sk);
212 tunnel = (struct l2tp_tunnel *)(sk->sk_user_data);
213 if (tunnel == NULL) {
214 sock_put(sk);
215 goto out;
216 }
217
218 BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC);
219
220out:
221 return tunnel;
222}
223
224extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id);
225extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth);
226extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname);
227extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);
228extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);
229
230extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp);
231extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);
232extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg);
233extern int l2tp_session_delete(struct l2tp_session *session);
234extern void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
235extern void l2tp_session_free(struct l2tp_session *session);
236extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb));
237extern int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb, int (*payload_hook)(struct sk_buff *skb));
238extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
239
240extern int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb, size_t data_len);
241extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len);
242extern void l2tp_tunnel_destruct(struct sock *sk);
243extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);
244extern void l2tp_session_set_header_len(struct l2tp_session *session, int version);
245
246extern int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops);
247extern void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type);
248
249/* Tunnel reference counts. Incremented per session that is added to
250 * the tunnel.
251 */
252static inline void l2tp_tunnel_inc_refcount_1(struct l2tp_tunnel *tunnel)
253{
254 atomic_inc(&tunnel->ref_count);
255}
256
257static inline void l2tp_tunnel_dec_refcount_1(struct l2tp_tunnel *tunnel)
258{
259 if (atomic_dec_and_test(&tunnel->ref_count))
260 l2tp_tunnel_free(tunnel);
261}
262#ifdef L2TP_REFCNT_DEBUG
263#define l2tp_tunnel_inc_refcount(_t) do { \
264 printk(KERN_DEBUG "l2tp_tunnel_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
265 l2tp_tunnel_inc_refcount_1(_t); \
266 } while (0)
267#define l2tp_tunnel_dec_refcount(_t) do { \
268 printk(KERN_DEBUG "l2tp_tunnel_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_t)->name, atomic_read(&_t->ref_count)); \
269 l2tp_tunnel_dec_refcount_1(_t); \
270 } while (0)
271#else
272#define l2tp_tunnel_inc_refcount(t) l2tp_tunnel_inc_refcount_1(t)
273#define l2tp_tunnel_dec_refcount(t) l2tp_tunnel_dec_refcount_1(t)
274#endif
275
276/* Session reference counts. Incremented when code obtains a reference
277 * to a session.
278 */
279static inline void l2tp_session_inc_refcount_1(struct l2tp_session *session)
280{
281 atomic_inc(&session->ref_count);
282}
283
284static inline void l2tp_session_dec_refcount_1(struct l2tp_session *session)
285{
286 if (atomic_dec_and_test(&session->ref_count))
287 l2tp_session_free(session);
288}
289
290#ifdef L2TP_REFCNT_DEBUG
291#define l2tp_session_inc_refcount(_s) do { \
292 printk(KERN_DEBUG "l2tp_session_inc_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
293 l2tp_session_inc_refcount_1(_s); \
294 } while (0)
295#define l2tp_session_dec_refcount(_s) do { \
296 printk(KERN_DEBUG "l2tp_session_dec_refcount: %s:%d %s: cnt=%d\n", __func__, __LINE__, (_s)->name, atomic_read(&_s->ref_count)); \
297 l2tp_session_dec_refcount_1(_s); \
298 } while (0)
299#else
300#define l2tp_session_inc_refcount(s) l2tp_session_inc_refcount_1(s)
301#define l2tp_session_dec_refcount(s) l2tp_session_dec_refcount_1(s)
302#endif
303
304#endif /* _L2TP_CORE_H_ */
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
new file mode 100644
index 000000000000..104ec3b283d4
--- /dev/null
+++ b/net/l2tp/l2tp_debugfs.c
@@ -0,0 +1,341 @@
1/*
2 * L2TP subsystem debugfs
3 *
4 * Copyright (c) 2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/socket.h>
15#include <linux/hash.h>
16#include <linux/l2tp.h>
17#include <linux/in.h>
18#include <linux/etherdevice.h>
19#include <linux/spinlock.h>
20#include <linux/debugfs.h>
21#include <net/sock.h>
22#include <net/ip.h>
23#include <net/icmp.h>
24#include <net/udp.h>
25#include <net/inet_common.h>
26#include <net/inet_hashtables.h>
27#include <net/tcp_states.h>
28#include <net/protocol.h>
29#include <net/xfrm.h>
30#include <net/net_namespace.h>
31#include <net/netns/generic.h>
32
33#include "l2tp_core.h"
34
35static struct dentry *rootdir;
36static struct dentry *tunnels;
37
38struct l2tp_dfs_seq_data {
39 struct net *net;
40 int tunnel_idx; /* current tunnel */
41 int session_idx; /* index of session within current tunnel */
42 struct l2tp_tunnel *tunnel;
43 struct l2tp_session *session; /* NULL means get next tunnel */
44};
45
46static void l2tp_dfs_next_tunnel(struct l2tp_dfs_seq_data *pd)
47{
48 pd->tunnel = l2tp_tunnel_find_nth(pd->net, pd->tunnel_idx);
49 pd->tunnel_idx++;
50}
51
52static void l2tp_dfs_next_session(struct l2tp_dfs_seq_data *pd)
53{
54 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
55 pd->session_idx++;
56
57 if (pd->session == NULL) {
58 pd->session_idx = 0;
59 l2tp_dfs_next_tunnel(pd);
60 }
61
62}
63
64static void *l2tp_dfs_seq_start(struct seq_file *m, loff_t *offs)
65{
66 struct l2tp_dfs_seq_data *pd = SEQ_START_TOKEN;
67 loff_t pos = *offs;
68
69 if (!pos)
70 goto out;
71
72 BUG_ON(m->private == NULL);
73 pd = m->private;
74
75 if (pd->tunnel == NULL)
76 l2tp_dfs_next_tunnel(pd);
77 else
78 l2tp_dfs_next_session(pd);
79
80 /* NULL tunnel and session indicates end of list */
81 if ((pd->tunnel == NULL) && (pd->session == NULL))
82 pd = NULL;
83
84out:
85 return pd;
86}
87
88
89static void *l2tp_dfs_seq_next(struct seq_file *m, void *v, loff_t *pos)
90{
91 (*pos)++;
92 return NULL;
93}
94
95static void l2tp_dfs_seq_stop(struct seq_file *p, void *v)
96{
97 /* nothing to do */
98}
99
100static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
101{
102 struct l2tp_tunnel *tunnel = v;
103 int session_count = 0;
104 int hash;
105 struct hlist_node *walk;
106 struct hlist_node *tmp;
107
108 read_lock_bh(&tunnel->hlist_lock);
109 for (hash = 0; hash < L2TP_HASH_SIZE; hash++) {
110 hlist_for_each_safe(walk, tmp, &tunnel->session_hlist[hash]) {
111 struct l2tp_session *session;
112
113 session = hlist_entry(walk, struct l2tp_session, hlist);
114 if (session->session_id == 0)
115 continue;
116
117 session_count++;
118 }
119 }
120 read_unlock_bh(&tunnel->hlist_lock);
121
122 seq_printf(m, "\nTUNNEL %u peer %u", tunnel->tunnel_id, tunnel->peer_tunnel_id);
123 if (tunnel->sock) {
124 struct inet_sock *inet = inet_sk(tunnel->sock);
125 seq_printf(m, " from %pI4 to %pI4\n",
126 &inet->inet_saddr, &inet->inet_daddr);
127 if (tunnel->encap == L2TP_ENCAPTYPE_UDP)
128 seq_printf(m, " source port %hu, dest port %hu\n",
129 ntohs(inet->inet_sport), ntohs(inet->inet_dport));
130 }
131 seq_printf(m, " L2TPv%d, %s\n", tunnel->version,
132 tunnel->encap == L2TP_ENCAPTYPE_UDP ? "UDP" :
133 tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
134 "");
135 seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
136 tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
137 atomic_read(&tunnel->ref_count));
138
139 seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n",
140 tunnel->debug,
141 (unsigned long long)tunnel->stats.tx_packets,
142 (unsigned long long)tunnel->stats.tx_bytes,
143 (unsigned long long)tunnel->stats.tx_errors,
144 (unsigned long long)tunnel->stats.rx_packets,
145 (unsigned long long)tunnel->stats.rx_bytes,
146 (unsigned long long)tunnel->stats.rx_errors);
147
148 if (tunnel->show != NULL)
149 tunnel->show(m, tunnel);
150}
151
152static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)
153{
154 struct l2tp_session *session = v;
155
156 seq_printf(m, " SESSION %u, peer %u, %s\n", session->session_id,
157 session->peer_session_id,
158 session->pwtype == L2TP_PWTYPE_ETH ? "ETH" :
159 session->pwtype == L2TP_PWTYPE_PPP ? "PPP" :
160 "");
161 if (session->send_seq || session->recv_seq)
162 seq_printf(m, " nr %hu, ns %hu\n", session->nr, session->ns);
163 seq_printf(m, " refcnt %d\n", atomic_read(&session->ref_count));
164 seq_printf(m, " config %d/%d/%c/%c/%s/%s %08x %u\n",
165 session->mtu, session->mru,
166 session->recv_seq ? 'R' : '-',
167 session->send_seq ? 'S' : '-',
168 session->data_seq == 1 ? "IPSEQ" :
169 session->data_seq == 2 ? "DATASEQ" : "-",
170 session->lns_mode ? "LNS" : "LAC",
171 session->debug,
172 jiffies_to_msecs(session->reorder_timeout));
173 seq_printf(m, " offset %hu l2specific %hu/%hu\n",
174 session->offset, session->l2specific_type, session->l2specific_len);
175 if (session->cookie_len) {
176 seq_printf(m, " cookie %02x%02x%02x%02x",
177 session->cookie[0], session->cookie[1],
178 session->cookie[2], session->cookie[3]);
179 if (session->cookie_len == 8)
180 seq_printf(m, "%02x%02x%02x%02x",
181 session->cookie[4], session->cookie[5],
182 session->cookie[6], session->cookie[7]);
183 seq_printf(m, "\n");
184 }
185 if (session->peer_cookie_len) {
186 seq_printf(m, " peer cookie %02x%02x%02x%02x",
187 session->peer_cookie[0], session->peer_cookie[1],
188 session->peer_cookie[2], session->peer_cookie[3]);
189 if (session->peer_cookie_len == 8)
190 seq_printf(m, "%02x%02x%02x%02x",
191 session->peer_cookie[4], session->peer_cookie[5],
192 session->peer_cookie[6], session->peer_cookie[7]);
193 seq_printf(m, "\n");
194 }
195
196 seq_printf(m, " %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n",
197 session->nr, session->ns,
198 (unsigned long long)session->stats.tx_packets,
199 (unsigned long long)session->stats.tx_bytes,
200 (unsigned long long)session->stats.tx_errors,
201 (unsigned long long)session->stats.rx_packets,
202 (unsigned long long)session->stats.rx_bytes,
203 (unsigned long long)session->stats.rx_errors);
204
205 if (session->show != NULL)
206 session->show(m, session);
207}
208
209static int l2tp_dfs_seq_show(struct seq_file *m, void *v)
210{
211 struct l2tp_dfs_seq_data *pd = v;
212
213 /* display header on line 1 */
214 if (v == SEQ_START_TOKEN) {
215 seq_puts(m, "TUNNEL ID, peer ID from IP to IP\n");
216 seq_puts(m, " L2TPv2/L2TPv3, UDP/IP\n");
217 seq_puts(m, " sessions session-count, refcnt refcnt/sk->refcnt\n");
218 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
219 seq_puts(m, " SESSION ID, peer ID, PWTYPE\n");
220 seq_puts(m, " refcnt cnt\n");
221 seq_puts(m, " offset OFFSET l2specific TYPE/LEN\n");
222 seq_puts(m, " [ cookie ]\n");
223 seq_puts(m, " [ peer cookie ]\n");
224 seq_puts(m, " config mtu/mru/rcvseq/sendseq/dataseq/lns debug reorderto\n");
225 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
226 goto out;
227 }
228
229 /* Show the tunnel or session context */
230 if (pd->session == NULL)
231 l2tp_dfs_seq_tunnel_show(m, pd->tunnel);
232 else
233 l2tp_dfs_seq_session_show(m, pd->session);
234
235out:
236 return 0;
237}
238
239static const struct seq_operations l2tp_dfs_seq_ops = {
240 .start = l2tp_dfs_seq_start,
241 .next = l2tp_dfs_seq_next,
242 .stop = l2tp_dfs_seq_stop,
243 .show = l2tp_dfs_seq_show,
244};
245
246static int l2tp_dfs_seq_open(struct inode *inode, struct file *file)
247{
248 struct l2tp_dfs_seq_data *pd;
249 struct seq_file *seq;
250 int rc = -ENOMEM;
251
252 pd = kzalloc(GFP_KERNEL, sizeof(*pd));
253 if (pd == NULL)
254 goto out;
255
256 /* Derive the network namespace from the pid opening the
257 * file.
258 */
259 pd->net = get_net_ns_by_pid(current->pid);
260 if (IS_ERR(pd->net)) {
261 rc = -PTR_ERR(pd->net);
262 goto err_free_pd;
263 }
264
265 rc = seq_open(file, &l2tp_dfs_seq_ops);
266 if (rc)
267 goto err_free_net;
268
269 seq = file->private_data;
270 seq->private = pd;
271
272out:
273 return rc;
274
275err_free_net:
276 put_net(pd->net);
277err_free_pd:
278 kfree(pd);
279 goto out;
280}
281
282static int l2tp_dfs_seq_release(struct inode *inode, struct file *file)
283{
284 struct l2tp_dfs_seq_data *pd;
285 struct seq_file *seq;
286
287 seq = file->private_data;
288 pd = seq->private;
289 if (pd->net)
290 put_net(pd->net);
291 kfree(pd);
292 seq_release(inode, file);
293
294 return 0;
295}
296
297static const struct file_operations l2tp_dfs_fops = {
298 .owner = THIS_MODULE,
299 .open = l2tp_dfs_seq_open,
300 .read = seq_read,
301 .llseek = seq_lseek,
302 .release = l2tp_dfs_seq_release,
303};
304
305static int __init l2tp_debugfs_init(void)
306{
307 int rc = 0;
308
309 rootdir = debugfs_create_dir("l2tp", NULL);
310 if (IS_ERR(rootdir)) {
311 rc = PTR_ERR(rootdir);
312 rootdir = NULL;
313 goto out;
314 }
315
316 tunnels = debugfs_create_file("tunnels", 0600, rootdir, NULL, &l2tp_dfs_fops);
317 if (tunnels == NULL)
318 rc = -EIO;
319
320 printk(KERN_INFO "L2TP debugfs support\n");
321
322out:
323 if (rc)
324 printk(KERN_WARNING "l2tp debugfs: unable to init\n");
325
326 return rc;
327}
328
329static void __exit l2tp_debugfs_exit(void)
330{
331 debugfs_remove(tunnels);
332 debugfs_remove(rootdir);
333}
334
335module_init(l2tp_debugfs_init);
336module_exit(l2tp_debugfs_exit);
337
338MODULE_LICENSE("GPL");
339MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
340MODULE_DESCRIPTION("L2TP debugfs driver");
341MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
new file mode 100644
index 000000000000..ca1164afeb74
--- /dev/null
+++ b/net/l2tp/l2tp_eth.c
@@ -0,0 +1,361 @@
1/*
2 * L2TPv3 ethernet pseudowire driver
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/skbuff.h>
14#include <linux/socket.h>
15#include <linux/hash.h>
16#include <linux/l2tp.h>
17#include <linux/in.h>
18#include <linux/etherdevice.h>
19#include <linux/spinlock.h>
20#include <net/sock.h>
21#include <net/ip.h>
22#include <net/icmp.h>
23#include <net/udp.h>
24#include <net/inet_common.h>
25#include <net/inet_hashtables.h>
26#include <net/tcp_states.h>
27#include <net/protocol.h>
28#include <net/xfrm.h>
29#include <net/net_namespace.h>
30#include <net/netns/generic.h>
31
32#include "l2tp_core.h"
33
34/* Default device name. May be overridden by name specified by user */
35#define L2TP_ETH_DEV_NAME "l2tpeth%d"
36
37/* via netdev_priv() */
38struct l2tp_eth {
39 struct net_device *dev;
40 struct sock *tunnel_sock;
41 struct l2tp_session *session;
42 struct list_head list;
43};
44
45/* via l2tp_session_priv() */
46struct l2tp_eth_sess {
47 struct net_device *dev;
48};
49
50/* per-net private data for this module */
51static unsigned int l2tp_eth_net_id;
52struct l2tp_eth_net {
53 struct list_head l2tp_eth_dev_list;
54 spinlock_t l2tp_eth_lock;
55};
56
57static inline struct l2tp_eth_net *l2tp_eth_pernet(struct net *net)
58{
59 return net_generic(net, l2tp_eth_net_id);
60}
61
62static int l2tp_eth_dev_init(struct net_device *dev)
63{
64 struct l2tp_eth *priv = netdev_priv(dev);
65
66 priv->dev = dev;
67 random_ether_addr(dev->dev_addr);
68 memset(&dev->broadcast[0], 0xff, 6);
69
70 return 0;
71}
72
73static void l2tp_eth_dev_uninit(struct net_device *dev)
74{
75 struct l2tp_eth *priv = netdev_priv(dev);
76 struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev));
77
78 spin_lock(&pn->l2tp_eth_lock);
79 list_del_init(&priv->list);
80 spin_unlock(&pn->l2tp_eth_lock);
81 dev_put(dev);
82}
83
84static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev)
85{
86 struct l2tp_eth *priv = netdev_priv(dev);
87 struct l2tp_session *session = priv->session;
88
89 l2tp_xmit_skb(session, skb, session->hdr_len);
90
91 dev->stats.tx_bytes += skb->len;
92 dev->stats.tx_packets++;
93
94 return 0;
95}
96
97static struct net_device_ops l2tp_eth_netdev_ops = {
98 .ndo_init = l2tp_eth_dev_init,
99 .ndo_uninit = l2tp_eth_dev_uninit,
100 .ndo_start_xmit = l2tp_eth_dev_xmit,
101};
102
103static void l2tp_eth_dev_setup(struct net_device *dev)
104{
105 ether_setup(dev);
106
107 dev->netdev_ops = &l2tp_eth_netdev_ops;
108 dev->destructor = free_netdev;
109}
110
111static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
112{
113 struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
114 struct net_device *dev = spriv->dev;
115
116 if (session->debug & L2TP_MSG_DATA) {
117 unsigned int length;
118 int offset;
119 u8 *ptr = skb->data;
120
121 length = min(32u, skb->len);
122 if (!pskb_may_pull(skb, length))
123 goto error;
124
125 printk(KERN_DEBUG "%s: eth recv: ", session->name);
126
127 offset = 0;
128 do {
129 printk(" %02X", ptr[offset]);
130 } while (++offset < length);
131
132 printk("\n");
133 }
134
135 if (data_len < ETH_HLEN)
136 goto error;
137
138 secpath_reset(skb);
139
140 /* checksums verified by L2TP */
141 skb->ip_summed = CHECKSUM_NONE;
142
143 skb_dst_drop(skb);
144 nf_reset(skb);
145
146 if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) {
147 dev->last_rx = jiffies;
148 dev->stats.rx_packets++;
149 dev->stats.rx_bytes += data_len;
150 } else
151 dev->stats.rx_errors++;
152
153 return;
154
155error:
156 dev->stats.rx_errors++;
157 kfree_skb(skb);
158}
159
160static void l2tp_eth_delete(struct l2tp_session *session)
161{
162 struct l2tp_eth_sess *spriv;
163 struct net_device *dev;
164
165 if (session) {
166 spriv = l2tp_session_priv(session);
167 dev = spriv->dev;
168 if (dev) {
169 unregister_netdev(dev);
170 spriv->dev = NULL;
171 }
172 }
173}
174
175#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
176static void l2tp_eth_show(struct seq_file *m, void *arg)
177{
178 struct l2tp_session *session = arg;
179 struct l2tp_eth_sess *spriv = l2tp_session_priv(session);
180 struct net_device *dev = spriv->dev;
181
182 seq_printf(m, " interface %s\n", dev->name);
183}
184#endif
185
186static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
187{
188 struct net_device *dev;
189 char name[IFNAMSIZ];
190 struct l2tp_tunnel *tunnel;
191 struct l2tp_session *session;
192 struct l2tp_eth *priv;
193 struct l2tp_eth_sess *spriv;
194 int rc;
195 struct l2tp_eth_net *pn;
196
197 tunnel = l2tp_tunnel_find(net, tunnel_id);
198 if (!tunnel) {
199 rc = -ENODEV;
200 goto out;
201 }
202
203 session = l2tp_session_find(net, tunnel, session_id);
204 if (session) {
205 rc = -EEXIST;
206 goto out;
207 }
208
209 if (cfg->ifname) {
210 dev = dev_get_by_name(net, cfg->ifname);
211 if (dev) {
212 dev_put(dev);
213 rc = -EEXIST;
214 goto out;
215 }
216 strlcpy(name, cfg->ifname, IFNAMSIZ);
217 } else
218 strcpy(name, L2TP_ETH_DEV_NAME);
219
220 session = l2tp_session_create(sizeof(*spriv), tunnel, session_id,
221 peer_session_id, cfg);
222 if (!session) {
223 rc = -ENOMEM;
224 goto out;
225 }
226
227 dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
228 if (!dev) {
229 rc = -ENOMEM;
230 goto out_del_session;
231 }
232
233 dev_net_set(dev, net);
234 if (session->mtu == 0)
235 session->mtu = dev->mtu - session->hdr_len;
236 dev->mtu = session->mtu;
237 dev->needed_headroom += session->hdr_len;
238
239 priv = netdev_priv(dev);
240 priv->dev = dev;
241 priv->session = session;
242 INIT_LIST_HEAD(&priv->list);
243
244 priv->tunnel_sock = tunnel->sock;
245 session->recv_skb = l2tp_eth_dev_recv;
246 session->session_close = l2tp_eth_delete;
247#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
248 session->show = l2tp_eth_show;
249#endif
250
251 spriv = l2tp_session_priv(session);
252 spriv->dev = dev;
253
254 rc = register_netdev(dev);
255 if (rc < 0)
256 goto out_del_dev;
257
258 /* Must be done after register_netdev() */
259 strlcpy(session->ifname, dev->name, IFNAMSIZ);
260
261 dev_hold(dev);
262 pn = l2tp_eth_pernet(dev_net(dev));
263 spin_lock(&pn->l2tp_eth_lock);
264 list_add(&priv->list, &pn->l2tp_eth_dev_list);
265 spin_unlock(&pn->l2tp_eth_lock);
266
267 return 0;
268
269out_del_dev:
270 free_netdev(dev);
271out_del_session:
272 l2tp_session_delete(session);
273out:
274 return rc;
275}
276
277static __net_init int l2tp_eth_init_net(struct net *net)
278{
279 struct l2tp_eth_net *pn;
280 int err;
281
282 pn = kzalloc(sizeof(*pn), GFP_KERNEL);
283 if (!pn)
284 return -ENOMEM;
285
286 INIT_LIST_HEAD(&pn->l2tp_eth_dev_list);
287 spin_lock_init(&pn->l2tp_eth_lock);
288
289 err = net_assign_generic(net, l2tp_eth_net_id, pn);
290 if (err)
291 goto out;
292
293 return 0;
294
295out:
296 kfree(pn);
297 return err;
298}
299
300static __net_exit void l2tp_eth_exit_net(struct net *net)
301{
302 struct l2tp_eth_net *pn;
303
304 pn = net_generic(net, l2tp_eth_net_id);
305 /*
306 * if someone has cached our net then
307 * further net_generic call will return NULL
308 */
309 net_assign_generic(net, l2tp_eth_net_id, NULL);
310 kfree(pn);
311}
312
313static __net_initdata struct pernet_operations l2tp_eth_net_ops = {
314 .init = l2tp_eth_init_net,
315 .exit = l2tp_eth_exit_net,
316 .id = &l2tp_eth_net_id,
317 .size = sizeof(struct l2tp_eth_net),
318};
319
320
321static const struct l2tp_nl_cmd_ops l2tp_eth_nl_cmd_ops = {
322 .session_create = l2tp_eth_create,
323 .session_delete = l2tp_session_delete,
324};
325
326
327static int __init l2tp_eth_init(void)
328{
329 int err = 0;
330
331 err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops);
332 if (err)
333 goto out;
334
335 err = register_pernet_device(&l2tp_eth_net_ops);
336 if (err)
337 goto out_unreg;
338
339 printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n");
340
341 return 0;
342
343out_unreg:
344 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
345out:
346 return err;
347}
348
349static void __exit l2tp_eth_exit(void)
350{
351 unregister_pernet_device(&l2tp_eth_net_ops);
352 l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH);
353}
354
355module_init(l2tp_eth_init);
356module_exit(l2tp_eth_exit);
357
358MODULE_LICENSE("GPL");
359MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
360MODULE_DESCRIPTION("L2TP ethernet pseudowire driver");
361MODULE_VERSION("1.0");
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
new file mode 100644
index 000000000000..0852512d392c
--- /dev/null
+++ b/net/l2tp/l2tp_ip.c
@@ -0,0 +1,679 @@
1/*
2 * L2TPv3 IP encapsulation support
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/icmp.h>
13#include <linux/module.h>
14#include <linux/skbuff.h>
15#include <linux/random.h>
16#include <linux/socket.h>
17#include <linux/l2tp.h>
18#include <linux/in.h>
19#include <net/sock.h>
20#include <net/ip.h>
21#include <net/icmp.h>
22#include <net/udp.h>
23#include <net/inet_common.h>
24#include <net/inet_hashtables.h>
25#include <net/tcp_states.h>
26#include <net/protocol.h>
27#include <net/xfrm.h>
28
29#include "l2tp_core.h"
30
31struct l2tp_ip_sock {
32 /* inet_sock has to be the first member of l2tp_ip_sock */
33 struct inet_sock inet;
34
35 __u32 conn_id;
36 __u32 peer_conn_id;
37
38 __u64 tx_packets;
39 __u64 tx_bytes;
40 __u64 tx_errors;
41 __u64 rx_packets;
42 __u64 rx_bytes;
43 __u64 rx_errors;
44};
45
46static DEFINE_RWLOCK(l2tp_ip_lock);
47static struct hlist_head l2tp_ip_table;
48static struct hlist_head l2tp_ip_bind_table;
49
50static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
51{
52 return (struct l2tp_ip_sock *)sk;
53}
54
55static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
56{
57 struct hlist_node *node;
58 struct sock *sk;
59
60 sk_for_each_bound(sk, node, &l2tp_ip_bind_table) {
61 struct inet_sock *inet = inet_sk(sk);
62 struct l2tp_ip_sock *l2tp = l2tp_ip_sk(sk);
63
64 if (l2tp == NULL)
65 continue;
66
67 if ((l2tp->conn_id == tunnel_id) &&
68#ifdef CONFIG_NET_NS
69 (sk->sk_net == net) &&
70#endif
71 !(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
72 !(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
73 goto found;
74 }
75
76 sk = NULL;
77found:
78 return sk;
79}
80
81static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
82{
83 struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
84 if (sk)
85 sock_hold(sk);
86
87 return sk;
88}
89
90/* When processing receive frames, there are two cases to
91 * consider. Data frames consist of a non-zero session-id and an
92 * optional cookie. Control frames consist of a regular L2TP header
93 * preceded by 32-bits of zeros.
94 *
95 * L2TPv3 Session Header Over IP
96 *
97 * 0 1 2 3
98 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
99 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
100 * | Session ID |
101 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
102 * | Cookie (optional, maximum 64 bits)...
103 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
104 * |
105 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
106 *
107 * L2TPv3 Control Message Header Over IP
108 *
109 * 0 1 2 3
110 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
111 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
112 * | (32 bits of zeros) |
113 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
114 * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length |
115 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
116 * | Control Connection ID |
117 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
118 * | Ns | Nr |
119 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
120 *
121 * All control frames are passed to userspace.
122 */
123static int l2tp_ip_recv(struct sk_buff *skb)
124{
125 struct sock *sk;
126 u32 session_id;
127 u32 tunnel_id;
128 unsigned char *ptr, *optr;
129 struct l2tp_session *session;
130 struct l2tp_tunnel *tunnel = NULL;
131 int length;
132 int offset;
133
134 /* Point to L2TP header */
135 optr = ptr = skb->data;
136
137 if (!pskb_may_pull(skb, 4))
138 goto discard;
139
140 session_id = ntohl(*((__be32 *) ptr));
141 ptr += 4;
142
143 /* RFC3931: L2TP/IP packets have the first 4 bytes containing
144 * the session_id. If it is 0, the packet is a L2TP control
145 * frame and the session_id value can be discarded.
146 */
147 if (session_id == 0) {
148 __skb_pull(skb, 4);
149 goto pass_up;
150 }
151
152 /* Ok, this is a data packet. Lookup the session. */
153 session = l2tp_session_find(&init_net, NULL, session_id);
154 if (session == NULL)
155 goto discard;
156
157 tunnel = session->tunnel;
158 if (tunnel == NULL)
159 goto discard;
160
161 /* Trace packet contents, if enabled */
162 if (tunnel->debug & L2TP_MSG_DATA) {
163 length = min(32u, skb->len);
164 if (!pskb_may_pull(skb, length))
165 goto discard;
166
167 printk(KERN_DEBUG "%s: ip recv: ", tunnel->name);
168
169 offset = 0;
170 do {
171 printk(" %02X", ptr[offset]);
172 } while (++offset < length);
173
174 printk("\n");
175 }
176
177 l2tp_recv_common(session, skb, ptr, optr, 0, skb->len, tunnel->recv_payload_hook);
178
179 return 0;
180
181pass_up:
182 /* Get the tunnel_id from the L2TP header */
183 if (!pskb_may_pull(skb, 12))
184 goto discard;
185
186 if ((skb->data[0] & 0xc0) != 0xc0)
187 goto discard;
188
189 tunnel_id = ntohl(*(__be32 *) &skb->data[4]);
190 tunnel = l2tp_tunnel_find(&init_net, tunnel_id);
191 if (tunnel != NULL)
192 sk = tunnel->sock;
193 else {
194 struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
195
196 read_lock_bh(&l2tp_ip_lock);
197 sk = __l2tp_ip_bind_lookup(&init_net, iph->daddr, 0, tunnel_id);
198 read_unlock_bh(&l2tp_ip_lock);
199 }
200
201 if (sk == NULL)
202 goto discard;
203
204 sock_hold(sk);
205
206 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
207 goto discard_put;
208
209 nf_reset(skb);
210
211 return sk_receive_skb(sk, skb, 1);
212
213discard_put:
214 sock_put(sk);
215
216discard:
217 kfree_skb(skb);
218 return 0;
219}
220
221static int l2tp_ip_open(struct sock *sk)
222{
223 /* Prevent autobind. We don't have ports. */
224 inet_sk(sk)->inet_num = IPPROTO_L2TP;
225
226 write_lock_bh(&l2tp_ip_lock);
227 sk_add_node(sk, &l2tp_ip_table);
228 write_unlock_bh(&l2tp_ip_lock);
229
230 return 0;
231}
232
233static void l2tp_ip_close(struct sock *sk, long timeout)
234{
235 write_lock_bh(&l2tp_ip_lock);
236 hlist_del_init(&sk->sk_bind_node);
237 hlist_del_init(&sk->sk_node);
238 write_unlock_bh(&l2tp_ip_lock);
239 sk_common_release(sk);
240}
241
242static void l2tp_ip_destroy_sock(struct sock *sk)
243{
244 struct sk_buff *skb;
245
246 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
247 kfree_skb(skb);
248
249 sk_refcnt_debug_dec(sk);
250}
251
252static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
253{
254 struct inet_sock *inet = inet_sk(sk);
255 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
256 int ret = -EINVAL;
257 int chk_addr_ret;
258
259 ret = -EADDRINUSE;
260 read_lock_bh(&l2tp_ip_lock);
261 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
262 goto out_in_use;
263
264 read_unlock_bh(&l2tp_ip_lock);
265
266 lock_sock(sk);
267 if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
268 goto out;
269
270 chk_addr_ret = inet_addr_type(&init_net, addr->l2tp_addr.s_addr);
271 ret = -EADDRNOTAVAIL;
272 if (addr->l2tp_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
273 chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
274 goto out;
275
276 inet->inet_rcv_saddr = inet->inet_saddr = addr->l2tp_addr.s_addr;
277 if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
278 inet->inet_saddr = 0; /* Use device */
279 sk_dst_reset(sk);
280
281 l2tp_ip_sk(sk)->conn_id = addr->l2tp_conn_id;
282
283 write_lock_bh(&l2tp_ip_lock);
284 sk_add_bind_node(sk, &l2tp_ip_bind_table);
285 sk_del_node_init(sk);
286 write_unlock_bh(&l2tp_ip_lock);
287 ret = 0;
288out:
289 release_sock(sk);
290
291 return ret;
292
293out_in_use:
294 read_unlock_bh(&l2tp_ip_lock);
295
296 return ret;
297}
298
299static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
300{
301 int rc;
302 struct inet_sock *inet = inet_sk(sk);
303 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
304 struct rtable *rt;
305 __be32 saddr;
306 int oif;
307
308 rc = -EINVAL;
309 if (addr_len < sizeof(*lsa))
310 goto out;
311
312 rc = -EAFNOSUPPORT;
313 if (lsa->l2tp_family != AF_INET)
314 goto out;
315
316 sk_dst_reset(sk);
317
318 oif = sk->sk_bound_dev_if;
319 saddr = inet->inet_saddr;
320
321 rc = -EINVAL;
322 if (ipv4_is_multicast(lsa->l2tp_addr.s_addr))
323 goto out;
324
325 rc = ip_route_connect(&rt, lsa->l2tp_addr.s_addr, saddr,
326 RT_CONN_FLAGS(sk), oif,
327 IPPROTO_L2TP,
328 0, 0, sk, 1);
329 if (rc) {
330 if (rc == -ENETUNREACH)
331 IP_INC_STATS_BH(&init_net, IPSTATS_MIB_OUTNOROUTES);
332 goto out;
333 }
334
335 rc = -ENETUNREACH;
336 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
337 ip_rt_put(rt);
338 goto out;
339 }
340
341 l2tp_ip_sk(sk)->peer_conn_id = lsa->l2tp_conn_id;
342
343 if (!inet->inet_saddr)
344 inet->inet_saddr = rt->rt_src;
345 if (!inet->inet_rcv_saddr)
346 inet->inet_rcv_saddr = rt->rt_src;
347 inet->inet_daddr = rt->rt_dst;
348 sk->sk_state = TCP_ESTABLISHED;
349 inet->inet_id = jiffies;
350
351 sk_dst_set(sk, &rt->u.dst);
352
353 write_lock_bh(&l2tp_ip_lock);
354 hlist_del_init(&sk->sk_bind_node);
355 sk_add_bind_node(sk, &l2tp_ip_bind_table);
356 write_unlock_bh(&l2tp_ip_lock);
357
358 rc = 0;
359out:
360 return rc;
361}
362
363static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
364 int *uaddr_len, int peer)
365{
366 struct sock *sk = sock->sk;
367 struct inet_sock *inet = inet_sk(sk);
368 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
369 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *)uaddr;
370
371 memset(lsa, 0, sizeof(*lsa));
372 lsa->l2tp_family = AF_INET;
373 if (peer) {
374 if (!inet->inet_dport)
375 return -ENOTCONN;
376 lsa->l2tp_conn_id = lsk->peer_conn_id;
377 lsa->l2tp_addr.s_addr = inet->inet_daddr;
378 } else {
379 __be32 addr = inet->inet_rcv_saddr;
380 if (!addr)
381 addr = inet->inet_saddr;
382 lsa->l2tp_conn_id = lsk->conn_id;
383 lsa->l2tp_addr.s_addr = addr;
384 }
385 *uaddr_len = sizeof(*lsa);
386 return 0;
387}
388
389static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
390{
391 int rc;
392
393 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
394 goto drop;
395
396 nf_reset(skb);
397
398 /* Charge it to the socket, dropping if the queue is full. */
399 rc = sock_queue_rcv_skb(sk, skb);
400 if (rc < 0)
401 goto drop;
402
403 return 0;
404
405drop:
406 IP_INC_STATS(&init_net, IPSTATS_MIB_INDISCARDS);
407 kfree_skb(skb);
408 return -1;
409}
410
411/* Userspace will call sendmsg() on the tunnel socket to send L2TP
412 * control frames.
413 */
414static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len)
415{
416 struct sk_buff *skb;
417 int rc;
418 struct l2tp_ip_sock *lsa = l2tp_ip_sk(sk);
419 struct inet_sock *inet = inet_sk(sk);
420 struct ip_options *opt = inet->opt;
421 struct rtable *rt = NULL;
422 int connected = 0;
423 __be32 daddr;
424
425 if (sock_flag(sk, SOCK_DEAD))
426 return -ENOTCONN;
427
428 /* Get and verify the address. */
429 if (msg->msg_name) {
430 struct sockaddr_l2tpip *lip = (struct sockaddr_l2tpip *) msg->msg_name;
431 if (msg->msg_namelen < sizeof(*lip))
432 return -EINVAL;
433
434 if (lip->l2tp_family != AF_INET) {
435 if (lip->l2tp_family != AF_UNSPEC)
436 return -EAFNOSUPPORT;
437 }
438
439 daddr = lip->l2tp_addr.s_addr;
440 } else {
441 if (sk->sk_state != TCP_ESTABLISHED)
442 return -EDESTADDRREQ;
443
444 daddr = inet->inet_daddr;
445 connected = 1;
446 }
447
448 /* Allocate a socket buffer */
449 rc = -ENOMEM;
450 skb = sock_wmalloc(sk, 2 + NET_SKB_PAD + sizeof(struct iphdr) +
451 4 + len, 0, GFP_KERNEL);
452 if (!skb)
453 goto error;
454
455 /* Reserve space for headers, putting IP header on 4-byte boundary. */
456 skb_reserve(skb, 2 + NET_SKB_PAD);
457 skb_reset_network_header(skb);
458 skb_reserve(skb, sizeof(struct iphdr));
459 skb_reset_transport_header(skb);
460
461 /* Insert 0 session_id */
462 *((__be32 *) skb_put(skb, 4)) = 0;
463
464 /* Copy user data into skb */
465 rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
466 if (rc < 0) {
467 kfree_skb(skb);
468 goto error;
469 }
470
471 if (connected)
472 rt = (struct rtable *) __sk_dst_check(sk, 0);
473
474 if (rt == NULL) {
475 /* Use correct destination address if we have options. */
476 if (opt && opt->srr)
477 daddr = opt->faddr;
478
479 {
480 struct flowi fl = { .oif = sk->sk_bound_dev_if,
481 .nl_u = { .ip4_u = {
482 .daddr = daddr,
483 .saddr = inet->inet_saddr,
484 .tos = RT_CONN_FLAGS(sk) } },
485 .proto = sk->sk_protocol,
486 .flags = inet_sk_flowi_flags(sk),
487 .uli_u = { .ports = {
488 .sport = inet->inet_sport,
489 .dport = inet->inet_dport } } };
490
491 /* If this fails, retransmit mechanism of transport layer will
492 * keep trying until route appears or the connection times
493 * itself out.
494 */
495 security_sk_classify_flow(sk, &fl);
496 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
497 goto no_route;
498 }
499 sk_setup_caps(sk, &rt->u.dst);
500 }
501 skb_dst_set(skb, dst_clone(&rt->u.dst));
502
503 /* Queue the packet to IP for output */
504 rc = ip_queue_xmit(skb);
505
506error:
507 /* Update stats */
508 if (rc >= 0) {
509 lsa->tx_packets++;
510 lsa->tx_bytes += len;
511 rc = len;
512 } else {
513 lsa->tx_errors++;
514 }
515
516 return rc;
517
518no_route:
519 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
520 kfree_skb(skb);
521 return -EHOSTUNREACH;
522}
523
524static int l2tp_ip_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
525 size_t len, int noblock, int flags, int *addr_len)
526{
527 struct inet_sock *inet = inet_sk(sk);
528 struct l2tp_ip_sock *lsk = l2tp_ip_sk(sk);
529 size_t copied = 0;
530 int err = -EOPNOTSUPP;
531 struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
532 struct sk_buff *skb;
533
534 if (flags & MSG_OOB)
535 goto out;
536
537 if (addr_len)
538 *addr_len = sizeof(*sin);
539
540 skb = skb_recv_datagram(sk, flags, noblock, &err);
541 if (!skb)
542 goto out;
543
544 copied = skb->len;
545 if (len < copied) {
546 msg->msg_flags |= MSG_TRUNC;
547 copied = len;
548 }
549
550 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
551 if (err)
552 goto done;
553
554 sock_recv_timestamp(msg, sk, skb);
555
556 /* Copy the address. */
557 if (sin) {
558 sin->sin_family = AF_INET;
559 sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
560 sin->sin_port = 0;
561 memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
562 }
563 if (inet->cmsg_flags)
564 ip_cmsg_recv(msg, skb);
565 if (flags & MSG_TRUNC)
566 copied = skb->len;
567done:
568 skb_free_datagram(sk, skb);
569out:
570 if (err) {
571 lsk->rx_errors++;
572 return err;
573 }
574
575 lsk->rx_packets++;
576 lsk->rx_bytes += copied;
577
578 return copied;
579}
580
581struct proto l2tp_ip_prot = {
582 .name = "L2TP/IP",
583 .owner = THIS_MODULE,
584 .init = l2tp_ip_open,
585 .close = l2tp_ip_close,
586 .bind = l2tp_ip_bind,
587 .connect = l2tp_ip_connect,
588 .disconnect = udp_disconnect,
589 .ioctl = udp_ioctl,
590 .destroy = l2tp_ip_destroy_sock,
591 .setsockopt = ip_setsockopt,
592 .getsockopt = ip_getsockopt,
593 .sendmsg = l2tp_ip_sendmsg,
594 .recvmsg = l2tp_ip_recvmsg,
595 .backlog_rcv = l2tp_ip_backlog_recv,
596 .hash = inet_hash,
597 .unhash = inet_unhash,
598 .obj_size = sizeof(struct l2tp_ip_sock),
599#ifdef CONFIG_COMPAT
600 .compat_setsockopt = compat_ip_setsockopt,
601 .compat_getsockopt = compat_ip_getsockopt,
602#endif
603};
604
605static const struct proto_ops l2tp_ip_ops = {
606 .family = PF_INET,
607 .owner = THIS_MODULE,
608 .release = inet_release,
609 .bind = inet_bind,
610 .connect = inet_dgram_connect,
611 .socketpair = sock_no_socketpair,
612 .accept = sock_no_accept,
613 .getname = l2tp_ip_getname,
614 .poll = datagram_poll,
615 .ioctl = inet_ioctl,
616 .listen = sock_no_listen,
617 .shutdown = inet_shutdown,
618 .setsockopt = sock_common_setsockopt,
619 .getsockopt = sock_common_getsockopt,
620 .sendmsg = inet_sendmsg,
621 .recvmsg = sock_common_recvmsg,
622 .mmap = sock_no_mmap,
623 .sendpage = sock_no_sendpage,
624#ifdef CONFIG_COMPAT
625 .compat_setsockopt = compat_sock_common_setsockopt,
626 .compat_getsockopt = compat_sock_common_getsockopt,
627#endif
628};
629
630static struct inet_protosw l2tp_ip_protosw = {
631 .type = SOCK_DGRAM,
632 .protocol = IPPROTO_L2TP,
633 .prot = &l2tp_ip_prot,
634 .ops = &l2tp_ip_ops,
635 .no_check = 0,
636};
637
638static struct net_protocol l2tp_ip_protocol __read_mostly = {
639 .handler = l2tp_ip_recv,
640};
641
642static int __init l2tp_ip_init(void)
643{
644 int err;
645
646 printk(KERN_INFO "L2TP IP encapsulation support (L2TPv3)\n");
647
648 err = proto_register(&l2tp_ip_prot, 1);
649 if (err != 0)
650 goto out;
651
652 err = inet_add_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
653 if (err)
654 goto out1;
655
656 inet_register_protosw(&l2tp_ip_protosw);
657 return 0;
658
659out1:
660 proto_unregister(&l2tp_ip_prot);
661out:
662 return err;
663}
664
665static void __exit l2tp_ip_exit(void)
666{
667 inet_unregister_protosw(&l2tp_ip_protosw);
668 inet_del_protocol(&l2tp_ip_protocol, IPPROTO_L2TP);
669 proto_unregister(&l2tp_ip_prot);
670}
671
672module_init(l2tp_ip_init);
673module_exit(l2tp_ip_exit);
674
675MODULE_LICENSE("GPL");
676MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
677MODULE_DESCRIPTION("L2TP over IP");
678MODULE_VERSION("1.0");
679MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
new file mode 100644
index 000000000000..4c1e540732d7
--- /dev/null
+++ b/net/l2tp/l2tp_netlink.c
@@ -0,0 +1,840 @@
1/*
2 * L2TP netlink layer, for management
3 *
4 * Copyright (c) 2008,2009,2010 Katalix Systems Ltd
5 *
6 * Partly based on the IrDA nelink implementation
7 * (see net/irda/irnetlink.c) which is:
8 * Copyright (c) 2007 Samuel Ortiz <samuel@sortiz.org>
9 * which is in turn partly based on the wireless netlink code:
10 * Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <net/sock.h>
18#include <net/genetlink.h>
19#include <net/udp.h>
20#include <linux/in.h>
21#include <linux/udp.h>
22#include <linux/socket.h>
23#include <linux/module.h>
24#include <linux/list.h>
25#include <net/net_namespace.h>
26
27#include <linux/l2tp.h>
28
29#include "l2tp_core.h"
30
31
32static struct genl_family l2tp_nl_family = {
33 .id = GENL_ID_GENERATE,
34 .name = L2TP_GENL_NAME,
35 .version = L2TP_GENL_VERSION,
36 .hdrsize = 0,
37 .maxattr = L2TP_ATTR_MAX,
38};
39
40/* Accessed under genl lock */
41static const struct l2tp_nl_cmd_ops *l2tp_nl_cmd_ops[__L2TP_PWTYPE_MAX];
42
43static struct l2tp_session *l2tp_nl_session_find(struct genl_info *info)
44{
45 u32 tunnel_id;
46 u32 session_id;
47 char *ifname;
48 struct l2tp_tunnel *tunnel;
49 struct l2tp_session *session = NULL;
50 struct net *net = genl_info_net(info);
51
52 if (info->attrs[L2TP_ATTR_IFNAME]) {
53 ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
54 session = l2tp_session_find_by_ifname(net, ifname);
55 } else if ((info->attrs[L2TP_ATTR_SESSION_ID]) &&
56 (info->attrs[L2TP_ATTR_CONN_ID])) {
57 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
58 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
59 tunnel = l2tp_tunnel_find(net, tunnel_id);
60 if (tunnel)
61 session = l2tp_session_find(net, tunnel, session_id);
62 }
63
64 return session;
65}
66
67static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
68{
69 struct sk_buff *msg;
70 void *hdr;
71 int ret = -ENOBUFS;
72
73 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
74 if (!msg) {
75 ret = -ENOMEM;
76 goto out;
77 }
78
79 hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
80 &l2tp_nl_family, 0, L2TP_CMD_NOOP);
81 if (IS_ERR(hdr)) {
82 ret = PTR_ERR(hdr);
83 goto err_out;
84 }
85
86 genlmsg_end(msg, hdr);
87
88 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
89
90err_out:
91 nlmsg_free(msg);
92
93out:
94 return ret;
95}
96
97static int l2tp_nl_cmd_tunnel_create(struct sk_buff *skb, struct genl_info *info)
98{
99 u32 tunnel_id;
100 u32 peer_tunnel_id;
101 int proto_version;
102 int fd;
103 int ret = 0;
104 struct l2tp_tunnel_cfg cfg = { 0, };
105 struct l2tp_tunnel *tunnel;
106 struct net *net = genl_info_net(info);
107
108 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
109 ret = -EINVAL;
110 goto out;
111 }
112 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
113
114 if (!info->attrs[L2TP_ATTR_PEER_CONN_ID]) {
115 ret = -EINVAL;
116 goto out;
117 }
118 peer_tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_CONN_ID]);
119
120 if (!info->attrs[L2TP_ATTR_PROTO_VERSION]) {
121 ret = -EINVAL;
122 goto out;
123 }
124 proto_version = nla_get_u8(info->attrs[L2TP_ATTR_PROTO_VERSION]);
125
126 if (!info->attrs[L2TP_ATTR_ENCAP_TYPE]) {
127 ret = -EINVAL;
128 goto out;
129 }
130 cfg.encap = nla_get_u16(info->attrs[L2TP_ATTR_ENCAP_TYPE]);
131
132 fd = -1;
133 if (info->attrs[L2TP_ATTR_FD]) {
134 fd = nla_get_u32(info->attrs[L2TP_ATTR_FD]);
135 } else {
136 if (info->attrs[L2TP_ATTR_IP_SADDR])
137 cfg.local_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_SADDR]);
138 if (info->attrs[L2TP_ATTR_IP_DADDR])
139 cfg.peer_ip.s_addr = nla_get_be32(info->attrs[L2TP_ATTR_IP_DADDR]);
140 if (info->attrs[L2TP_ATTR_UDP_SPORT])
141 cfg.local_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_SPORT]);
142 if (info->attrs[L2TP_ATTR_UDP_DPORT])
143 cfg.peer_udp_port = nla_get_u16(info->attrs[L2TP_ATTR_UDP_DPORT]);
144 if (info->attrs[L2TP_ATTR_UDP_CSUM])
145 cfg.use_udp_checksums = nla_get_flag(info->attrs[L2TP_ATTR_UDP_CSUM]);
146 }
147
148 if (info->attrs[L2TP_ATTR_DEBUG])
149 cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
150
151 tunnel = l2tp_tunnel_find(net, tunnel_id);
152 if (tunnel != NULL) {
153 ret = -EEXIST;
154 goto out;
155 }
156
157 ret = -EINVAL;
158 switch (cfg.encap) {
159 case L2TP_ENCAPTYPE_UDP:
160 case L2TP_ENCAPTYPE_IP:
161 ret = l2tp_tunnel_create(net, fd, proto_version, tunnel_id,
162 peer_tunnel_id, &cfg, &tunnel);
163 break;
164 }
165
166out:
167 return ret;
168}
169
170static int l2tp_nl_cmd_tunnel_delete(struct sk_buff *skb, struct genl_info *info)
171{
172 struct l2tp_tunnel *tunnel;
173 u32 tunnel_id;
174 int ret = 0;
175 struct net *net = genl_info_net(info);
176
177 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
178 ret = -EINVAL;
179 goto out;
180 }
181 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
182
183 tunnel = l2tp_tunnel_find(net, tunnel_id);
184 if (tunnel == NULL) {
185 ret = -ENODEV;
186 goto out;
187 }
188
189 (void) l2tp_tunnel_delete(tunnel);
190
191out:
192 return ret;
193}
194
195static int l2tp_nl_cmd_tunnel_modify(struct sk_buff *skb, struct genl_info *info)
196{
197 struct l2tp_tunnel *tunnel;
198 u32 tunnel_id;
199 int ret = 0;
200 struct net *net = genl_info_net(info);
201
202 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
203 ret = -EINVAL;
204 goto out;
205 }
206 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
207
208 tunnel = l2tp_tunnel_find(net, tunnel_id);
209 if (tunnel == NULL) {
210 ret = -ENODEV;
211 goto out;
212 }
213
214 if (info->attrs[L2TP_ATTR_DEBUG])
215 tunnel->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
216
217out:
218 return ret;
219}
220
221static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
222 struct l2tp_tunnel *tunnel)
223{
224 void *hdr;
225 struct nlattr *nest;
226 struct sock *sk = NULL;
227 struct inet_sock *inet;
228
229 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
230 L2TP_CMD_TUNNEL_GET);
231 if (IS_ERR(hdr))
232 return PTR_ERR(hdr);
233
234 NLA_PUT_U8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version);
235 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
236 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
237 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, tunnel->debug);
238 NLA_PUT_U16(skb, L2TP_ATTR_ENCAP_TYPE, tunnel->encap);
239
240 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
241 if (nest == NULL)
242 goto nla_put_failure;
243
244 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, tunnel->stats.tx_packets);
245 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, tunnel->stats.tx_bytes);
246 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, tunnel->stats.tx_errors);
247 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, tunnel->stats.rx_packets);
248 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, tunnel->stats.rx_bytes);
249 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, tunnel->stats.rx_seq_discards);
250 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, tunnel->stats.rx_oos_packets);
251 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, tunnel->stats.rx_errors);
252 nla_nest_end(skb, nest);
253
254 sk = tunnel->sock;
255 if (!sk)
256 goto out;
257
258 inet = inet_sk(sk);
259
260 switch (tunnel->encap) {
261 case L2TP_ENCAPTYPE_UDP:
262 NLA_PUT_U16(skb, L2TP_ATTR_UDP_SPORT, ntohs(inet->inet_sport));
263 NLA_PUT_U16(skb, L2TP_ATTR_UDP_DPORT, ntohs(inet->inet_dport));
264 NLA_PUT_U8(skb, L2TP_ATTR_UDP_CSUM, (sk->sk_no_check != UDP_CSUM_NOXMIT));
265 /* NOBREAK */
266 case L2TP_ENCAPTYPE_IP:
267 NLA_PUT_BE32(skb, L2TP_ATTR_IP_SADDR, inet->inet_saddr);
268 NLA_PUT_BE32(skb, L2TP_ATTR_IP_DADDR, inet->inet_daddr);
269 break;
270 }
271
272out:
273 return genlmsg_end(skb, hdr);
274
275nla_put_failure:
276 genlmsg_cancel(skb, hdr);
277 return -1;
278}
279
280static int l2tp_nl_cmd_tunnel_get(struct sk_buff *skb, struct genl_info *info)
281{
282 struct l2tp_tunnel *tunnel;
283 struct sk_buff *msg;
284 u32 tunnel_id;
285 int ret = -ENOBUFS;
286 struct net *net = genl_info_net(info);
287
288 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
289 ret = -EINVAL;
290 goto out;
291 }
292
293 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
294
295 tunnel = l2tp_tunnel_find(net, tunnel_id);
296 if (tunnel == NULL) {
297 ret = -ENODEV;
298 goto out;
299 }
300
301 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
302 if (!msg) {
303 ret = -ENOMEM;
304 goto out;
305 }
306
307 ret = l2tp_nl_tunnel_send(msg, info->snd_pid, info->snd_seq,
308 NLM_F_ACK, tunnel);
309 if (ret < 0)
310 goto err_out;
311
312 return genlmsg_unicast(net, msg, info->snd_pid);
313
314err_out:
315 nlmsg_free(msg);
316
317out:
318 return ret;
319}
320
321static int l2tp_nl_cmd_tunnel_dump(struct sk_buff *skb, struct netlink_callback *cb)
322{
323 int ti = cb->args[0];
324 struct l2tp_tunnel *tunnel;
325 struct net *net = sock_net(skb->sk);
326
327 for (;;) {
328 tunnel = l2tp_tunnel_find_nth(net, ti);
329 if (tunnel == NULL)
330 goto out;
331
332 if (l2tp_nl_tunnel_send(skb, NETLINK_CB(cb->skb).pid,
333 cb->nlh->nlmsg_seq, NLM_F_MULTI,
334 tunnel) <= 0)
335 goto out;
336
337 ti++;
338 }
339
340out:
341 cb->args[0] = ti;
342
343 return skb->len;
344}
345
346static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *info)
347{
348 u32 tunnel_id = 0;
349 u32 session_id;
350 u32 peer_session_id;
351 int ret = 0;
352 struct l2tp_tunnel *tunnel;
353 struct l2tp_session *session;
354 struct l2tp_session_cfg cfg = { 0, };
355 struct net *net = genl_info_net(info);
356
357 if (!info->attrs[L2TP_ATTR_CONN_ID]) {
358 ret = -EINVAL;
359 goto out;
360 }
361 tunnel_id = nla_get_u32(info->attrs[L2TP_ATTR_CONN_ID]);
362 tunnel = l2tp_tunnel_find(net, tunnel_id);
363 if (!tunnel) {
364 ret = -ENODEV;
365 goto out;
366 }
367
368 if (!info->attrs[L2TP_ATTR_SESSION_ID]) {
369 ret = -EINVAL;
370 goto out;
371 }
372 session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]);
373 session = l2tp_session_find(net, tunnel, session_id);
374 if (session) {
375 ret = -EEXIST;
376 goto out;
377 }
378
379 if (!info->attrs[L2TP_ATTR_PEER_SESSION_ID]) {
380 ret = -EINVAL;
381 goto out;
382 }
383 peer_session_id = nla_get_u32(info->attrs[L2TP_ATTR_PEER_SESSION_ID]);
384
385 if (!info->attrs[L2TP_ATTR_PW_TYPE]) {
386 ret = -EINVAL;
387 goto out;
388 }
389 cfg.pw_type = nla_get_u16(info->attrs[L2TP_ATTR_PW_TYPE]);
390 if (cfg.pw_type >= __L2TP_PWTYPE_MAX) {
391 ret = -EINVAL;
392 goto out;
393 }
394
395 if (tunnel->version > 2) {
396 if (info->attrs[L2TP_ATTR_OFFSET])
397 cfg.offset = nla_get_u16(info->attrs[L2TP_ATTR_OFFSET]);
398
399 if (info->attrs[L2TP_ATTR_DATA_SEQ])
400 cfg.data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
401
402 cfg.l2specific_type = L2TP_L2SPECTYPE_DEFAULT;
403 if (info->attrs[L2TP_ATTR_L2SPEC_TYPE])
404 cfg.l2specific_type = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_TYPE]);
405
406 cfg.l2specific_len = 4;
407 if (info->attrs[L2TP_ATTR_L2SPEC_LEN])
408 cfg.l2specific_len = nla_get_u8(info->attrs[L2TP_ATTR_L2SPEC_LEN]);
409
410 if (info->attrs[L2TP_ATTR_COOKIE]) {
411 u16 len = nla_len(info->attrs[L2TP_ATTR_COOKIE]);
412 if (len > 8) {
413 ret = -EINVAL;
414 goto out;
415 }
416 cfg.cookie_len = len;
417 memcpy(&cfg.cookie[0], nla_data(info->attrs[L2TP_ATTR_COOKIE]), len);
418 }
419 if (info->attrs[L2TP_ATTR_PEER_COOKIE]) {
420 u16 len = nla_len(info->attrs[L2TP_ATTR_PEER_COOKIE]);
421 if (len > 8) {
422 ret = -EINVAL;
423 goto out;
424 }
425 cfg.peer_cookie_len = len;
426 memcpy(&cfg.peer_cookie[0], nla_data(info->attrs[L2TP_ATTR_PEER_COOKIE]), len);
427 }
428 if (info->attrs[L2TP_ATTR_IFNAME])
429 cfg.ifname = nla_data(info->attrs[L2TP_ATTR_IFNAME]);
430
431 if (info->attrs[L2TP_ATTR_VLAN_ID])
432 cfg.vlan_id = nla_get_u16(info->attrs[L2TP_ATTR_VLAN_ID]);
433 }
434
435 if (info->attrs[L2TP_ATTR_DEBUG])
436 cfg.debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
437
438 if (info->attrs[L2TP_ATTR_RECV_SEQ])
439 cfg.recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
440
441 if (info->attrs[L2TP_ATTR_SEND_SEQ])
442 cfg.send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
443
444 if (info->attrs[L2TP_ATTR_LNS_MODE])
445 cfg.lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
446
447 if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
448 cfg.reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
449
450 if (info->attrs[L2TP_ATTR_MTU])
451 cfg.mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
452
453 if (info->attrs[L2TP_ATTR_MRU])
454 cfg.mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
455
456 if ((l2tp_nl_cmd_ops[cfg.pw_type] == NULL) ||
457 (l2tp_nl_cmd_ops[cfg.pw_type]->session_create == NULL)) {
458 ret = -EPROTONOSUPPORT;
459 goto out;
460 }
461
462 /* Check that pseudowire-specific params are present */
463 switch (cfg.pw_type) {
464 case L2TP_PWTYPE_NONE:
465 break;
466 case L2TP_PWTYPE_ETH_VLAN:
467 if (!info->attrs[L2TP_ATTR_VLAN_ID]) {
468 ret = -EINVAL;
469 goto out;
470 }
471 break;
472 case L2TP_PWTYPE_ETH:
473 break;
474 case L2TP_PWTYPE_PPP:
475 case L2TP_PWTYPE_PPP_AC:
476 break;
477 case L2TP_PWTYPE_IP:
478 default:
479 ret = -EPROTONOSUPPORT;
480 break;
481 }
482
483 ret = -EPROTONOSUPPORT;
484 if (l2tp_nl_cmd_ops[cfg.pw_type]->session_create)
485 ret = (*l2tp_nl_cmd_ops[cfg.pw_type]->session_create)(net, tunnel_id,
486 session_id, peer_session_id, &cfg);
487
488out:
489 return ret;
490}
491
492static int l2tp_nl_cmd_session_delete(struct sk_buff *skb, struct genl_info *info)
493{
494 int ret = 0;
495 struct l2tp_session *session;
496 u16 pw_type;
497
498 session = l2tp_nl_session_find(info);
499 if (session == NULL) {
500 ret = -ENODEV;
501 goto out;
502 }
503
504 pw_type = session->pwtype;
505 if (pw_type < __L2TP_PWTYPE_MAX)
506 if (l2tp_nl_cmd_ops[pw_type] && l2tp_nl_cmd_ops[pw_type]->session_delete)
507 ret = (*l2tp_nl_cmd_ops[pw_type]->session_delete)(session);
508
509out:
510 return ret;
511}
512
513static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *info)
514{
515 int ret = 0;
516 struct l2tp_session *session;
517
518 session = l2tp_nl_session_find(info);
519 if (session == NULL) {
520 ret = -ENODEV;
521 goto out;
522 }
523
524 if (info->attrs[L2TP_ATTR_DEBUG])
525 session->debug = nla_get_u32(info->attrs[L2TP_ATTR_DEBUG]);
526
527 if (info->attrs[L2TP_ATTR_DATA_SEQ])
528 session->data_seq = nla_get_u8(info->attrs[L2TP_ATTR_DATA_SEQ]);
529
530 if (info->attrs[L2TP_ATTR_RECV_SEQ])
531 session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
532
533 if (info->attrs[L2TP_ATTR_SEND_SEQ])
534 session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
535
536 if (info->attrs[L2TP_ATTR_LNS_MODE])
537 session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
538
539 if (info->attrs[L2TP_ATTR_RECV_TIMEOUT])
540 session->reorder_timeout = nla_get_msecs(info->attrs[L2TP_ATTR_RECV_TIMEOUT]);
541
542 if (info->attrs[L2TP_ATTR_MTU])
543 session->mtu = nla_get_u16(info->attrs[L2TP_ATTR_MTU]);
544
545 if (info->attrs[L2TP_ATTR_MRU])
546 session->mru = nla_get_u16(info->attrs[L2TP_ATTR_MRU]);
547
548out:
549 return ret;
550}
551
552static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
553 struct l2tp_session *session)
554{
555 void *hdr;
556 struct nlattr *nest;
557 struct l2tp_tunnel *tunnel = session->tunnel;
558 struct sock *sk = NULL;
559
560 sk = tunnel->sock;
561
562 hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
563 if (IS_ERR(hdr))
564 return PTR_ERR(hdr);
565
566 NLA_PUT_U32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id);
567 NLA_PUT_U32(skb, L2TP_ATTR_SESSION_ID, session->session_id);
568 NLA_PUT_U32(skb, L2TP_ATTR_PEER_CONN_ID, tunnel->peer_tunnel_id);
569 NLA_PUT_U32(skb, L2TP_ATTR_PEER_SESSION_ID, session->peer_session_id);
570 NLA_PUT_U32(skb, L2TP_ATTR_DEBUG, session->debug);
571 NLA_PUT_U16(skb, L2TP_ATTR_PW_TYPE, session->pwtype);
572 NLA_PUT_U16(skb, L2TP_ATTR_MTU, session->mtu);
573 if (session->mru)
574 NLA_PUT_U16(skb, L2TP_ATTR_MRU, session->mru);
575
576 if (session->ifname && session->ifname[0])
577 NLA_PUT_STRING(skb, L2TP_ATTR_IFNAME, session->ifname);
578 if (session->cookie_len)
579 NLA_PUT(skb, L2TP_ATTR_COOKIE, session->cookie_len, &session->cookie[0]);
580 if (session->peer_cookie_len)
581 NLA_PUT(skb, L2TP_ATTR_PEER_COOKIE, session->peer_cookie_len, &session->peer_cookie[0]);
582 NLA_PUT_U8(skb, L2TP_ATTR_RECV_SEQ, session->recv_seq);
583 NLA_PUT_U8(skb, L2TP_ATTR_SEND_SEQ, session->send_seq);
584 NLA_PUT_U8(skb, L2TP_ATTR_LNS_MODE, session->lns_mode);
585#ifdef CONFIG_XFRM
586 if ((sk) && (sk->sk_policy[0] || sk->sk_policy[1]))
587 NLA_PUT_U8(skb, L2TP_ATTR_USING_IPSEC, 1);
588#endif
589 if (session->reorder_timeout)
590 NLA_PUT_MSECS(skb, L2TP_ATTR_RECV_TIMEOUT, session->reorder_timeout);
591
592 nest = nla_nest_start(skb, L2TP_ATTR_STATS);
593 if (nest == NULL)
594 goto nla_put_failure;
595 NLA_PUT_U64(skb, L2TP_ATTR_TX_PACKETS, session->stats.tx_packets);
596 NLA_PUT_U64(skb, L2TP_ATTR_TX_BYTES, session->stats.tx_bytes);
597 NLA_PUT_U64(skb, L2TP_ATTR_TX_ERRORS, session->stats.tx_errors);
598 NLA_PUT_U64(skb, L2TP_ATTR_RX_PACKETS, session->stats.rx_packets);
599 NLA_PUT_U64(skb, L2TP_ATTR_RX_BYTES, session->stats.rx_bytes);
600 NLA_PUT_U64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, session->stats.rx_seq_discards);
601 NLA_PUT_U64(skb, L2TP_ATTR_RX_OOS_PACKETS, session->stats.rx_oos_packets);
602 NLA_PUT_U64(skb, L2TP_ATTR_RX_ERRORS, session->stats.rx_errors);
603 nla_nest_end(skb, nest);
604
605 return genlmsg_end(skb, hdr);
606
607 nla_put_failure:
608 genlmsg_cancel(skb, hdr);
609 return -1;
610}
611
612static int l2tp_nl_cmd_session_get(struct sk_buff *skb, struct genl_info *info)
613{
614 struct l2tp_session *session;
615 struct sk_buff *msg;
616 int ret;
617
618 session = l2tp_nl_session_find(info);
619 if (session == NULL) {
620 ret = -ENODEV;
621 goto out;
622 }
623
624 msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
625 if (!msg) {
626 ret = -ENOMEM;
627 goto out;
628 }
629
630 ret = l2tp_nl_session_send(msg, info->snd_pid, info->snd_seq,
631 0, session);
632 if (ret < 0)
633 goto err_out;
634
635 return genlmsg_unicast(genl_info_net(info), msg, info->snd_pid);
636
637err_out:
638 nlmsg_free(msg);
639
640out:
641 return ret;
642}
643
644static int l2tp_nl_cmd_session_dump(struct sk_buff *skb, struct netlink_callback *cb)
645{
646 struct net *net = sock_net(skb->sk);
647 struct l2tp_session *session;
648 struct l2tp_tunnel *tunnel = NULL;
649 int ti = cb->args[0];
650 int si = cb->args[1];
651
652 for (;;) {
653 if (tunnel == NULL) {
654 tunnel = l2tp_tunnel_find_nth(net, ti);
655 if (tunnel == NULL)
656 goto out;
657 }
658
659 session = l2tp_session_find_nth(tunnel, si);
660 if (session == NULL) {
661 ti++;
662 tunnel = NULL;
663 si = 0;
664 continue;
665 }
666
667 if (l2tp_nl_session_send(skb, NETLINK_CB(cb->skb).pid,
668 cb->nlh->nlmsg_seq, NLM_F_MULTI,
669 session) <= 0)
670 break;
671
672 si++;
673 }
674
675out:
676 cb->args[0] = ti;
677 cb->args[1] = si;
678
679 return skb->len;
680}
681
682static struct nla_policy l2tp_nl_policy[L2TP_ATTR_MAX + 1] = {
683 [L2TP_ATTR_NONE] = { .type = NLA_UNSPEC, },
684 [L2TP_ATTR_PW_TYPE] = { .type = NLA_U16, },
685 [L2TP_ATTR_ENCAP_TYPE] = { .type = NLA_U16, },
686 [L2TP_ATTR_OFFSET] = { .type = NLA_U16, },
687 [L2TP_ATTR_DATA_SEQ] = { .type = NLA_U8, },
688 [L2TP_ATTR_L2SPEC_TYPE] = { .type = NLA_U8, },
689 [L2TP_ATTR_L2SPEC_LEN] = { .type = NLA_U8, },
690 [L2TP_ATTR_PROTO_VERSION] = { .type = NLA_U8, },
691 [L2TP_ATTR_CONN_ID] = { .type = NLA_U32, },
692 [L2TP_ATTR_PEER_CONN_ID] = { .type = NLA_U32, },
693 [L2TP_ATTR_SESSION_ID] = { .type = NLA_U32, },
694 [L2TP_ATTR_PEER_SESSION_ID] = { .type = NLA_U32, },
695 [L2TP_ATTR_UDP_CSUM] = { .type = NLA_U8, },
696 [L2TP_ATTR_VLAN_ID] = { .type = NLA_U16, },
697 [L2TP_ATTR_DEBUG] = { .type = NLA_U32, },
698 [L2TP_ATTR_RECV_SEQ] = { .type = NLA_U8, },
699 [L2TP_ATTR_SEND_SEQ] = { .type = NLA_U8, },
700 [L2TP_ATTR_LNS_MODE] = { .type = NLA_U8, },
701 [L2TP_ATTR_USING_IPSEC] = { .type = NLA_U8, },
702 [L2TP_ATTR_RECV_TIMEOUT] = { .type = NLA_MSECS, },
703 [L2TP_ATTR_FD] = { .type = NLA_U32, },
704 [L2TP_ATTR_IP_SADDR] = { .type = NLA_U32, },
705 [L2TP_ATTR_IP_DADDR] = { .type = NLA_U32, },
706 [L2TP_ATTR_UDP_SPORT] = { .type = NLA_U16, },
707 [L2TP_ATTR_UDP_DPORT] = { .type = NLA_U16, },
708 [L2TP_ATTR_MTU] = { .type = NLA_U16, },
709 [L2TP_ATTR_MRU] = { .type = NLA_U16, },
710 [L2TP_ATTR_STATS] = { .type = NLA_NESTED, },
711 [L2TP_ATTR_IFNAME] = {
712 .type = NLA_NUL_STRING,
713 .len = IFNAMSIZ - 1,
714 },
715 [L2TP_ATTR_COOKIE] = {
716 .type = NLA_BINARY,
717 .len = 8,
718 },
719 [L2TP_ATTR_PEER_COOKIE] = {
720 .type = NLA_BINARY,
721 .len = 8,
722 },
723};
724
725static struct genl_ops l2tp_nl_ops[] = {
726 {
727 .cmd = L2TP_CMD_NOOP,
728 .doit = l2tp_nl_cmd_noop,
729 .policy = l2tp_nl_policy,
730 /* can be retrieved by unprivileged users */
731 },
732 {
733 .cmd = L2TP_CMD_TUNNEL_CREATE,
734 .doit = l2tp_nl_cmd_tunnel_create,
735 .policy = l2tp_nl_policy,
736 .flags = GENL_ADMIN_PERM,
737 },
738 {
739 .cmd = L2TP_CMD_TUNNEL_DELETE,
740 .doit = l2tp_nl_cmd_tunnel_delete,
741 .policy = l2tp_nl_policy,
742 .flags = GENL_ADMIN_PERM,
743 },
744 {
745 .cmd = L2TP_CMD_TUNNEL_MODIFY,
746 .doit = l2tp_nl_cmd_tunnel_modify,
747 .policy = l2tp_nl_policy,
748 .flags = GENL_ADMIN_PERM,
749 },
750 {
751 .cmd = L2TP_CMD_TUNNEL_GET,
752 .doit = l2tp_nl_cmd_tunnel_get,
753 .dumpit = l2tp_nl_cmd_tunnel_dump,
754 .policy = l2tp_nl_policy,
755 .flags = GENL_ADMIN_PERM,
756 },
757 {
758 .cmd = L2TP_CMD_SESSION_CREATE,
759 .doit = l2tp_nl_cmd_session_create,
760 .policy = l2tp_nl_policy,
761 .flags = GENL_ADMIN_PERM,
762 },
763 {
764 .cmd = L2TP_CMD_SESSION_DELETE,
765 .doit = l2tp_nl_cmd_session_delete,
766 .policy = l2tp_nl_policy,
767 .flags = GENL_ADMIN_PERM,
768 },
769 {
770 .cmd = L2TP_CMD_SESSION_MODIFY,
771 .doit = l2tp_nl_cmd_session_modify,
772 .policy = l2tp_nl_policy,
773 .flags = GENL_ADMIN_PERM,
774 },
775 {
776 .cmd = L2TP_CMD_SESSION_GET,
777 .doit = l2tp_nl_cmd_session_get,
778 .dumpit = l2tp_nl_cmd_session_dump,
779 .policy = l2tp_nl_policy,
780 .flags = GENL_ADMIN_PERM,
781 },
782};
783
784int l2tp_nl_register_ops(enum l2tp_pwtype pw_type, const struct l2tp_nl_cmd_ops *ops)
785{
786 int ret;
787
788 ret = -EINVAL;
789 if (pw_type >= __L2TP_PWTYPE_MAX)
790 goto err;
791
792 genl_lock();
793 ret = -EBUSY;
794 if (l2tp_nl_cmd_ops[pw_type])
795 goto out;
796
797 l2tp_nl_cmd_ops[pw_type] = ops;
798
799out:
800 genl_unlock();
801err:
802 return 0;
803}
804EXPORT_SYMBOL_GPL(l2tp_nl_register_ops);
805
806void l2tp_nl_unregister_ops(enum l2tp_pwtype pw_type)
807{
808 if (pw_type < __L2TP_PWTYPE_MAX) {
809 genl_lock();
810 l2tp_nl_cmd_ops[pw_type] = NULL;
811 genl_unlock();
812 }
813}
814EXPORT_SYMBOL_GPL(l2tp_nl_unregister_ops);
815
816static int l2tp_nl_init(void)
817{
818 int err;
819
820 printk(KERN_INFO "L2TP netlink interface\n");
821 err = genl_register_family_with_ops(&l2tp_nl_family, l2tp_nl_ops,
822 ARRAY_SIZE(l2tp_nl_ops));
823
824 return err;
825}
826
827static void l2tp_nl_cleanup(void)
828{
829 genl_unregister_family(&l2tp_nl_family);
830}
831
832module_init(l2tp_nl_init);
833module_exit(l2tp_nl_cleanup);
834
835MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
836MODULE_DESCRIPTION("L2TP netlink");
837MODULE_LICENSE("GPL");
838MODULE_VERSION("1.0");
839MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \
840 __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
new file mode 100644
index 000000000000..90d82b3f2889
--- /dev/null
+++ b/net/l2tp/l2tp_ppp.c
@@ -0,0 +1,1837 @@
1/*****************************************************************************
2 * Linux PPP over L2TP (PPPoX/PPPoL2TP) Sockets
3 *
4 * PPPoX --- Generic PPP encapsulation socket family
5 * PPPoL2TP --- PPP over L2TP (RFC 2661)
6 *
7 * Version: 2.0.0
8 *
9 * Authors: James Chapman (jchapman@katalix.com)
10 *
11 * Based on original work by Martijn van Oosterhout <kleptog@svana.org>
12 *
13 * License:
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public License
16 * as published by the Free Software Foundation; either version
17 * 2 of the License, or (at your option) any later version.
18 *
19 */
20
21/* This driver handles only L2TP data frames; control frames are handled by a
22 * userspace application.
23 *
24 * To send data in an L2TP session, userspace opens a PPPoL2TP socket and
25 * attaches it to a bound UDP socket with local tunnel_id / session_id and
26 * peer tunnel_id / session_id set. Data can then be sent or received using
27 * regular socket sendmsg() / recvmsg() calls. Kernel parameters of the socket
28 * can be read or modified using ioctl() or [gs]etsockopt() calls.
29 *
30 * When a PPPoL2TP socket is connected with local and peer session_id values
31 * zero, the socket is treated as a special tunnel management socket.
32 *
33 * Here's example userspace code to create a socket for sending/receiving data
34 * over an L2TP session:-
35 *
36 * struct sockaddr_pppol2tp sax;
37 * int fd;
38 * int session_fd;
39 *
40 * fd = socket(AF_PPPOX, SOCK_DGRAM, PX_PROTO_OL2TP);
41 *
42 * sax.sa_family = AF_PPPOX;
43 * sax.sa_protocol = PX_PROTO_OL2TP;
44 * sax.pppol2tp.fd = tunnel_fd; // bound UDP socket
45 * sax.pppol2tp.addr.sin_addr.s_addr = addr->sin_addr.s_addr;
46 * sax.pppol2tp.addr.sin_port = addr->sin_port;
47 * sax.pppol2tp.addr.sin_family = AF_INET;
48 * sax.pppol2tp.s_tunnel = tunnel_id;
49 * sax.pppol2tp.s_session = session_id;
50 * sax.pppol2tp.d_tunnel = peer_tunnel_id;
51 * sax.pppol2tp.d_session = peer_session_id;
52 *
53 * session_fd = connect(fd, (struct sockaddr *)&sax, sizeof(sax));
54 *
55 * A pppd plugin that allows PPP traffic to be carried over L2TP using
56 * this driver is available from the OpenL2TP project at
57 * http://openl2tp.sourceforge.net.
58 */
59
60#include <linux/module.h>
61#include <linux/string.h>
62#include <linux/list.h>
63#include <linux/uaccess.h>
64
65#include <linux/kernel.h>
66#include <linux/spinlock.h>
67#include <linux/kthread.h>
68#include <linux/sched.h>
69#include <linux/slab.h>
70#include <linux/errno.h>
71#include <linux/jiffies.h>
72
73#include <linux/netdevice.h>
74#include <linux/net.h>
75#include <linux/inetdevice.h>
76#include <linux/skbuff.h>
77#include <linux/init.h>
78#include <linux/ip.h>
79#include <linux/udp.h>
80#include <linux/if_pppox.h>
81#include <linux/if_pppol2tp.h>
82#include <net/sock.h>
83#include <linux/ppp_channel.h>
84#include <linux/ppp_defs.h>
85#include <linux/if_ppp.h>
86#include <linux/file.h>
87#include <linux/hash.h>
88#include <linux/sort.h>
89#include <linux/proc_fs.h>
90#include <linux/l2tp.h>
91#include <linux/nsproxy.h>
92#include <net/net_namespace.h>
93#include <net/netns/generic.h>
94#include <net/dst.h>
95#include <net/ip.h>
96#include <net/udp.h>
97#include <net/xfrm.h>
98
99#include <asm/byteorder.h>
100#include <asm/atomic.h>
101
102#include "l2tp_core.h"
103
104#define PPPOL2TP_DRV_VERSION "V2.0"
105
106/* Space for UDP, L2TP and PPP headers */
107#define PPPOL2TP_HEADER_OVERHEAD 40
108
109#define PRINTK(_mask, _type, _lvl, _fmt, args...) \
110 do { \
111 if ((_mask) & (_type)) \
112 printk(_lvl "PPPOL2TP: " _fmt, ##args); \
113 } while (0)
114
115/* Number of bytes to build transmit L2TP headers.
116 * Unfortunately the size is different depending on whether sequence numbers
117 * are enabled.
118 */
119#define PPPOL2TP_L2TP_HDR_SIZE_SEQ 10
120#define PPPOL2TP_L2TP_HDR_SIZE_NOSEQ 6
121
122/* Private data of each session. This data lives at the end of struct
123 * l2tp_session, referenced via session->priv[].
124 */
125struct pppol2tp_session {
126 int owner; /* pid that opened the socket */
127
128 struct sock *sock; /* Pointer to the session
129 * PPPoX socket */
130 struct sock *tunnel_sock; /* Pointer to the tunnel UDP
131 * socket */
132 int flags; /* accessed by PPPIOCGFLAGS.
133 * Unused. */
134};
135
136static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb);
137
138static struct ppp_channel_ops pppol2tp_chan_ops = { pppol2tp_xmit , NULL };
139static const struct proto_ops pppol2tp_ops;
140
141/* Helpers to obtain tunnel/session contexts from sockets.
142 */
143static inline struct l2tp_session *pppol2tp_sock_to_session(struct sock *sk)
144{
145 struct l2tp_session *session;
146
147 if (sk == NULL)
148 return NULL;
149
150 sock_hold(sk);
151 session = (struct l2tp_session *)(sk->sk_user_data);
152 if (session == NULL) {
153 sock_put(sk);
154 goto out;
155 }
156
157 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
158
159out:
160 return session;
161}
162
163/*****************************************************************************
164 * Receive data handling
165 *****************************************************************************/
166
167static int pppol2tp_recv_payload_hook(struct sk_buff *skb)
168{
169 /* Skip PPP header, if present. In testing, Microsoft L2TP clients
170 * don't send the PPP header (PPP header compression enabled), but
171 * other clients can include the header. So we cope with both cases
172 * here. The PPP header is always FF03 when using L2TP.
173 *
174 * Note that skb->data[] isn't dereferenced from a u16 ptr here since
175 * the field may be unaligned.
176 */
177 if (!pskb_may_pull(skb, 2))
178 return 1;
179
180 if ((skb->data[0] == 0xff) && (skb->data[1] == 0x03))
181 skb_pull(skb, 2);
182
183 return 0;
184}
185
186/* Receive message. This is the recvmsg for the PPPoL2TP socket.
187 */
188static int pppol2tp_recvmsg(struct kiocb *iocb, struct socket *sock,
189 struct msghdr *msg, size_t len,
190 int flags)
191{
192 int err;
193 struct sk_buff *skb;
194 struct sock *sk = sock->sk;
195
196 err = -EIO;
197 if (sk->sk_state & PPPOX_BOUND)
198 goto end;
199
200 msg->msg_namelen = 0;
201
202 err = 0;
203 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
204 flags & MSG_DONTWAIT, &err);
205 if (!skb)
206 goto end;
207
208 if (len > skb->len)
209 len = skb->len;
210 else if (len < skb->len)
211 msg->msg_flags |= MSG_TRUNC;
212
213 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len);
214 if (likely(err == 0))
215 err = len;
216
217 kfree_skb(skb);
218end:
219 return err;
220}
221
222static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len)
223{
224 struct pppol2tp_session *ps = l2tp_session_priv(session);
225 struct sock *sk = NULL;
226
227 /* If the socket is bound, send it in to PPP's input queue. Otherwise
228 * queue it on the session socket.
229 */
230 sk = ps->sock;
231 if (sk == NULL)
232 goto no_sock;
233
234 if (sk->sk_state & PPPOX_BOUND) {
235 struct pppox_sock *po;
236 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_DEBUG,
237 "%s: recv %d byte data frame, passing to ppp\n",
238 session->name, data_len);
239
240 /* We need to forget all info related to the L2TP packet
241 * gathered in the skb as we are going to reuse the same
242 * skb for the inner packet.
243 * Namely we need to:
244 * - reset xfrm (IPSec) information as it applies to
245 * the outer L2TP packet and not to the inner one
246 * - release the dst to force a route lookup on the inner
247 * IP packet since skb->dst currently points to the dst
248 * of the UDP tunnel
249 * - reset netfilter information as it doesn't apply
250 * to the inner packet either
251 */
252 secpath_reset(skb);
253 skb_dst_drop(skb);
254 nf_reset(skb);
255
256 po = pppox_sk(sk);
257 ppp_input(&po->chan, skb);
258 } else {
259 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
260 "%s: socket not bound\n", session->name);
261
262 /* Not bound. Nothing we can do, so discard. */
263 session->stats.rx_errors++;
264 kfree_skb(skb);
265 }
266
267 return;
268
269no_sock:
270 PRINTK(session->debug, PPPOL2TP_MSG_DATA, KERN_INFO,
271 "%s: no socket\n", session->name);
272 kfree_skb(skb);
273}
274
275static void pppol2tp_session_sock_hold(struct l2tp_session *session)
276{
277 struct pppol2tp_session *ps = l2tp_session_priv(session);
278
279 if (ps->sock)
280 sock_hold(ps->sock);
281}
282
283static void pppol2tp_session_sock_put(struct l2tp_session *session)
284{
285 struct pppol2tp_session *ps = l2tp_session_priv(session);
286
287 if (ps->sock)
288 sock_put(ps->sock);
289}
290
291/************************************************************************
292 * Transmit handling
293 ***********************************************************************/
294
295/* This is the sendmsg for the PPPoL2TP pppol2tp_session socket. We come here
296 * when a user application does a sendmsg() on the session socket. L2TP and
297 * PPP headers must be inserted into the user's data.
298 */
299static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
300 size_t total_len)
301{
302 static const unsigned char ppph[2] = { 0xff, 0x03 };
303 struct sock *sk = sock->sk;
304 struct sk_buff *skb;
305 int error;
306 struct l2tp_session *session;
307 struct l2tp_tunnel *tunnel;
308 struct pppol2tp_session *ps;
309 int uhlen;
310
311 error = -ENOTCONN;
312 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
313 goto error;
314
315 /* Get session and tunnel contexts */
316 error = -EBADF;
317 session = pppol2tp_sock_to_session(sk);
318 if (session == NULL)
319 goto error;
320
321 ps = l2tp_session_priv(session);
322 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
323 if (tunnel == NULL)
324 goto error_put_sess;
325
326 uhlen = (tunnel->encap == L2TP_ENCAPTYPE_UDP) ? sizeof(struct udphdr) : 0;
327
328 /* Allocate a socket buffer */
329 error = -ENOMEM;
330 skb = sock_wmalloc(sk, NET_SKB_PAD + sizeof(struct iphdr) +
331 uhlen + session->hdr_len +
332 sizeof(ppph) + total_len,
333 0, GFP_KERNEL);
334 if (!skb)
335 goto error_put_sess_tun;
336
337 /* Reserve space for headers. */
338 skb_reserve(skb, NET_SKB_PAD);
339 skb_reset_network_header(skb);
340 skb_reserve(skb, sizeof(struct iphdr));
341 skb_reset_transport_header(skb);
342 skb_reserve(skb, uhlen);
343
344 /* Add PPP header */
345 skb->data[0] = ppph[0];
346 skb->data[1] = ppph[1];
347 skb_put(skb, 2);
348
349 /* Copy user data into skb */
350 error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
351 if (error < 0) {
352 kfree_skb(skb);
353 goto error_put_sess_tun;
354 }
355 skb_put(skb, total_len);
356
357 l2tp_xmit_skb(session, skb, session->hdr_len);
358
359 sock_put(ps->tunnel_sock);
360
361 return error;
362
363error_put_sess_tun:
364 sock_put(ps->tunnel_sock);
365error_put_sess:
366 sock_put(sk);
367error:
368 return error;
369}
370
371/* Transmit function called by generic PPP driver. Sends PPP frame
372 * over PPPoL2TP socket.
373 *
374 * This is almost the same as pppol2tp_sendmsg(), but rather than
375 * being called with a msghdr from userspace, it is called with a skb
376 * from the kernel.
377 *
378 * The supplied skb from ppp doesn't have enough headroom for the
379 * insertion of L2TP, UDP and IP headers so we need to allocate more
380 * headroom in the skb. This will create a cloned skb. But we must be
381 * careful in the error case because the caller will expect to free
382 * the skb it supplied, not our cloned skb. So we take care to always
383 * leave the original skb unfreed if we return an error.
384 */
385static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
386{
387 static const u8 ppph[2] = { 0xff, 0x03 };
388 struct sock *sk = (struct sock *) chan->private;
389 struct sock *sk_tun;
390 struct l2tp_session *session;
391 struct l2tp_tunnel *tunnel;
392 struct pppol2tp_session *ps;
393 int old_headroom;
394 int new_headroom;
395
396 if (sock_flag(sk, SOCK_DEAD) || !(sk->sk_state & PPPOX_CONNECTED))
397 goto abort;
398
399 /* Get session and tunnel contexts from the socket */
400 session = pppol2tp_sock_to_session(sk);
401 if (session == NULL)
402 goto abort;
403
404 ps = l2tp_session_priv(session);
405 sk_tun = ps->tunnel_sock;
406 if (sk_tun == NULL)
407 goto abort_put_sess;
408 tunnel = l2tp_sock_to_tunnel(sk_tun);
409 if (tunnel == NULL)
410 goto abort_put_sess;
411
412 old_headroom = skb_headroom(skb);
413 if (skb_cow_head(skb, sizeof(ppph)))
414 goto abort_put_sess_tun;
415
416 new_headroom = skb_headroom(skb);
417 skb->truesize += new_headroom - old_headroom;
418
419 /* Setup PPP header */
420 __skb_push(skb, sizeof(ppph));
421 skb->data[0] = ppph[0];
422 skb->data[1] = ppph[1];
423
424 l2tp_xmit_skb(session, skb, session->hdr_len);
425
426 sock_put(sk_tun);
427 sock_put(sk);
428 return 1;
429
430abort_put_sess_tun:
431 sock_put(sk_tun);
432abort_put_sess:
433 sock_put(sk);
434abort:
435 /* Free the original skb */
436 kfree_skb(skb);
437 return 1;
438}
439
440/*****************************************************************************
441 * Session (and tunnel control) socket create/destroy.
442 *****************************************************************************/
443
444/* Called by l2tp_core when a session socket is being closed.
445 */
446static void pppol2tp_session_close(struct l2tp_session *session)
447{
448 struct pppol2tp_session *ps = l2tp_session_priv(session);
449 struct sock *sk = ps->sock;
450 struct sk_buff *skb;
451
452 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
453
454 if (session->session_id == 0)
455 goto out;
456
457 if (sk != NULL) {
458 lock_sock(sk);
459
460 if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
461 pppox_unbind_sock(sk);
462 sk->sk_state = PPPOX_DEAD;
463 sk->sk_state_change(sk);
464 }
465
466 /* Purge any queued data */
467 skb_queue_purge(&sk->sk_receive_queue);
468 skb_queue_purge(&sk->sk_write_queue);
469 while ((skb = skb_dequeue(&session->reorder_q))) {
470 kfree_skb(skb);
471 sock_put(sk);
472 }
473
474 release_sock(sk);
475 }
476
477out:
478 return;
479}
480
481/* Really kill the session socket. (Called from sock_put() if
482 * refcnt == 0.)
483 */
484static void pppol2tp_session_destruct(struct sock *sk)
485{
486 struct l2tp_session *session;
487
488 if (sk->sk_user_data != NULL) {
489 session = sk->sk_user_data;
490 if (session == NULL)
491 goto out;
492
493 sk->sk_user_data = NULL;
494 BUG_ON(session->magic != L2TP_SESSION_MAGIC);
495 l2tp_session_dec_refcount(session);
496 }
497
498out:
499 return;
500}
501
502/* Called when the PPPoX socket (session) is closed.
503 */
504static int pppol2tp_release(struct socket *sock)
505{
506 struct sock *sk = sock->sk;
507 struct l2tp_session *session;
508 int error;
509
510 if (!sk)
511 return 0;
512
513 error = -EBADF;
514 lock_sock(sk);
515 if (sock_flag(sk, SOCK_DEAD) != 0)
516 goto error;
517
518 pppox_unbind_sock(sk);
519
520 /* Signal the death of the socket. */
521 sk->sk_state = PPPOX_DEAD;
522 sock_orphan(sk);
523 sock->sk = NULL;
524
525 session = pppol2tp_sock_to_session(sk);
526
527 /* Purge any queued data */
528 skb_queue_purge(&sk->sk_receive_queue);
529 skb_queue_purge(&sk->sk_write_queue);
530 if (session != NULL) {
531 struct sk_buff *skb;
532 while ((skb = skb_dequeue(&session->reorder_q))) {
533 kfree_skb(skb);
534 sock_put(sk);
535 }
536 sock_put(sk);
537 }
538
539 release_sock(sk);
540
541 /* This will delete the session context via
542 * pppol2tp_session_destruct() if the socket's refcnt drops to
543 * zero.
544 */
545 sock_put(sk);
546
547 return 0;
548
549error:
550 release_sock(sk);
551 return error;
552}
553
554static struct proto pppol2tp_sk_proto = {
555 .name = "PPPOL2TP",
556 .owner = THIS_MODULE,
557 .obj_size = sizeof(struct pppox_sock),
558};
559
560static int pppol2tp_backlog_recv(struct sock *sk, struct sk_buff *skb)
561{
562 int rc;
563
564 rc = l2tp_udp_encap_recv(sk, skb);
565 if (rc)
566 kfree_skb(skb);
567
568 return NET_RX_SUCCESS;
569}
570
571/* socket() handler. Initialize a new struct sock.
572 */
573static int pppol2tp_create(struct net *net, struct socket *sock)
574{
575 int error = -ENOMEM;
576 struct sock *sk;
577
578 sk = sk_alloc(net, PF_PPPOX, GFP_KERNEL, &pppol2tp_sk_proto);
579 if (!sk)
580 goto out;
581
582 sock_init_data(sock, sk);
583
584 sock->state = SS_UNCONNECTED;
585 sock->ops = &pppol2tp_ops;
586
587 sk->sk_backlog_rcv = pppol2tp_backlog_recv;
588 sk->sk_protocol = PX_PROTO_OL2TP;
589 sk->sk_family = PF_PPPOX;
590 sk->sk_state = PPPOX_NONE;
591 sk->sk_type = SOCK_STREAM;
592 sk->sk_destruct = pppol2tp_session_destruct;
593
594 error = 0;
595
596out:
597 return error;
598}
599
600#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
601static void pppol2tp_show(struct seq_file *m, void *arg)
602{
603 struct l2tp_session *session = arg;
604 struct pppol2tp_session *ps = l2tp_session_priv(session);
605
606 if (ps) {
607 struct pppox_sock *po = pppox_sk(ps->sock);
608 if (po)
609 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
610 }
611}
612#endif
613
614/* connect() handler. Attach a PPPoX socket to a tunnel UDP socket
615 */
616static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
617 int sockaddr_len, int flags)
618{
619 struct sock *sk = sock->sk;
620 struct sockaddr_pppol2tp *sp = (struct sockaddr_pppol2tp *) uservaddr;
621 struct sockaddr_pppol2tpv3 *sp3 = (struct sockaddr_pppol2tpv3 *) uservaddr;
622 struct pppox_sock *po = pppox_sk(sk);
623 struct l2tp_session *session = NULL;
624 struct l2tp_tunnel *tunnel;
625 struct pppol2tp_session *ps;
626 struct dst_entry *dst;
627 struct l2tp_session_cfg cfg = { 0, };
628 int error = 0;
629 u32 tunnel_id, peer_tunnel_id;
630 u32 session_id, peer_session_id;
631 int ver = 2;
632 int fd;
633
634 lock_sock(sk);
635
636 error = -EINVAL;
637 if (sp->sa_protocol != PX_PROTO_OL2TP)
638 goto end;
639
640 /* Check for already bound sockets */
641 error = -EBUSY;
642 if (sk->sk_state & PPPOX_CONNECTED)
643 goto end;
644
645 /* We don't supporting rebinding anyway */
646 error = -EALREADY;
647 if (sk->sk_user_data)
648 goto end; /* socket is already attached */
649
650 /* Get params from socket address. Handle L2TPv2 and L2TPv3 */
651 if (sockaddr_len == sizeof(struct sockaddr_pppol2tp)) {
652 fd = sp->pppol2tp.fd;
653 tunnel_id = sp->pppol2tp.s_tunnel;
654 peer_tunnel_id = sp->pppol2tp.d_tunnel;
655 session_id = sp->pppol2tp.s_session;
656 peer_session_id = sp->pppol2tp.d_session;
657 } else if (sockaddr_len == sizeof(struct sockaddr_pppol2tpv3)) {
658 ver = 3;
659 fd = sp3->pppol2tp.fd;
660 tunnel_id = sp3->pppol2tp.s_tunnel;
661 peer_tunnel_id = sp3->pppol2tp.d_tunnel;
662 session_id = sp3->pppol2tp.s_session;
663 peer_session_id = sp3->pppol2tp.d_session;
664 } else {
665 error = -EINVAL;
666 goto end; /* bad socket address */
667 }
668
669 /* Don't bind if tunnel_id is 0 */
670 error = -EINVAL;
671 if (tunnel_id == 0)
672 goto end;
673
674 tunnel = l2tp_tunnel_find(sock_net(sk), tunnel_id);
675
676 /* Special case: create tunnel context if session_id and
677 * peer_session_id is 0. Otherwise look up tunnel using supplied
678 * tunnel id.
679 */
680 if ((session_id == 0) && (peer_session_id == 0)) {
681 if (tunnel == NULL) {
682 struct l2tp_tunnel_cfg tcfg = {
683 .encap = L2TP_ENCAPTYPE_UDP,
684 .debug = 0,
685 };
686 error = l2tp_tunnel_create(sock_net(sk), fd, ver, tunnel_id, peer_tunnel_id, &tcfg, &tunnel);
687 if (error < 0)
688 goto end;
689 }
690 } else {
691 /* Error if we can't find the tunnel */
692 error = -ENOENT;
693 if (tunnel == NULL)
694 goto end;
695
696 /* Error if socket is not prepped */
697 if (tunnel->sock == NULL)
698 goto end;
699 }
700
701 if (tunnel->recv_payload_hook == NULL)
702 tunnel->recv_payload_hook = pppol2tp_recv_payload_hook;
703
704 if (tunnel->peer_tunnel_id == 0) {
705 if (ver == 2)
706 tunnel->peer_tunnel_id = sp->pppol2tp.d_tunnel;
707 else
708 tunnel->peer_tunnel_id = sp3->pppol2tp.d_tunnel;
709 }
710
711 /* Create session if it doesn't already exist. We handle the
712 * case where a session was previously created by the netlink
713 * interface by checking that the session doesn't already have
714 * a socket and its tunnel socket are what we expect. If any
715 * of those checks fail, return EEXIST to the caller.
716 */
717 session = l2tp_session_find(sock_net(sk), tunnel, session_id);
718 if (session == NULL) {
719 /* Default MTU must allow space for UDP/L2TP/PPP
720 * headers.
721 */
722 cfg.mtu = cfg.mru = 1500 - PPPOL2TP_HEADER_OVERHEAD;
723
724 /* Allocate and initialize a new session context. */
725 session = l2tp_session_create(sizeof(struct pppol2tp_session),
726 tunnel, session_id,
727 peer_session_id, &cfg);
728 if (session == NULL) {
729 error = -ENOMEM;
730 goto end;
731 }
732 } else {
733 ps = l2tp_session_priv(session);
734 error = -EEXIST;
735 if (ps->sock != NULL)
736 goto end;
737
738 /* consistency checks */
739 if (ps->tunnel_sock != tunnel->sock)
740 goto end;
741 }
742
743 /* Associate session with its PPPoL2TP socket */
744 ps = l2tp_session_priv(session);
745 ps->owner = current->pid;
746 ps->sock = sk;
747 ps->tunnel_sock = tunnel->sock;
748
749 session->recv_skb = pppol2tp_recv;
750 session->session_close = pppol2tp_session_close;
751#if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE)
752 session->show = pppol2tp_show;
753#endif
754
755 /* We need to know each time a skb is dropped from the reorder
756 * queue.
757 */
758 session->ref = pppol2tp_session_sock_hold;
759 session->deref = pppol2tp_session_sock_put;
760
761 /* If PMTU discovery was enabled, use the MTU that was discovered */
762 dst = sk_dst_get(sk);
763 if (dst != NULL) {
764 u32 pmtu = dst_mtu(__sk_dst_get(sk));
765 if (pmtu != 0)
766 session->mtu = session->mru = pmtu -
767 PPPOL2TP_HEADER_OVERHEAD;
768 dst_release(dst);
769 }
770
771 /* Special case: if source & dest session_id == 0x0000, this
772 * socket is being created to manage the tunnel. Just set up
773 * the internal context for use by ioctl() and sockopt()
774 * handlers.
775 */
776 if ((session->session_id == 0) &&
777 (session->peer_session_id == 0)) {
778 error = 0;
779 goto out_no_ppp;
780 }
781
782 /* The only header we need to worry about is the L2TP
783 * header. This size is different depending on whether
784 * sequence numbers are enabled for the data channel.
785 */
786 po->chan.hdrlen = PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
787
788 po->chan.private = sk;
789 po->chan.ops = &pppol2tp_chan_ops;
790 po->chan.mtu = session->mtu;
791
792 error = ppp_register_net_channel(sock_net(sk), &po->chan);
793 if (error)
794 goto end;
795
796out_no_ppp:
797 /* This is how we get the session context from the socket. */
798 sk->sk_user_data = session;
799 sk->sk_state = PPPOX_CONNECTED;
800 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
801 "%s: created\n", session->name);
802
803end:
804 release_sock(sk);
805
806 return error;
807}
808
809#ifdef CONFIG_L2TP_V3
810
811/* Called when creating sessions via the netlink interface.
812 */
813static int pppol2tp_session_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
814{
815 int error;
816 struct l2tp_tunnel *tunnel;
817 struct l2tp_session *session;
818 struct pppol2tp_session *ps;
819
820 tunnel = l2tp_tunnel_find(net, tunnel_id);
821
822 /* Error if we can't find the tunnel */
823 error = -ENOENT;
824 if (tunnel == NULL)
825 goto out;
826
827 /* Error if tunnel socket is not prepped */
828 if (tunnel->sock == NULL)
829 goto out;
830
831 /* Check that this session doesn't already exist */
832 error = -EEXIST;
833 session = l2tp_session_find(net, tunnel, session_id);
834 if (session != NULL)
835 goto out;
836
837 /* Default MTU values. */
838 if (cfg->mtu == 0)
839 cfg->mtu = 1500 - PPPOL2TP_HEADER_OVERHEAD;
840 if (cfg->mru == 0)
841 cfg->mru = cfg->mtu;
842
843 /* Allocate and initialize a new session context. */
844 error = -ENOMEM;
845 session = l2tp_session_create(sizeof(struct pppol2tp_session),
846 tunnel, session_id,
847 peer_session_id, cfg);
848 if (session == NULL)
849 goto out;
850
851 ps = l2tp_session_priv(session);
852 ps->tunnel_sock = tunnel->sock;
853
854 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
855 "%s: created\n", session->name);
856
857 error = 0;
858
859out:
860 return error;
861}
862
863/* Called when deleting sessions via the netlink interface.
864 */
865static int pppol2tp_session_delete(struct l2tp_session *session)
866{
867 struct pppol2tp_session *ps = l2tp_session_priv(session);
868
869 if (ps->sock == NULL)
870 l2tp_session_dec_refcount(session);
871
872 return 0;
873}
874
875#endif /* CONFIG_L2TP_V3 */
876
877/* getname() support.
878 */
879static int pppol2tp_getname(struct socket *sock, struct sockaddr *uaddr,
880 int *usockaddr_len, int peer)
881{
882 int len = 0;
883 int error = 0;
884 struct l2tp_session *session;
885 struct l2tp_tunnel *tunnel;
886 struct sock *sk = sock->sk;
887 struct inet_sock *inet;
888 struct pppol2tp_session *pls;
889
890 error = -ENOTCONN;
891 if (sk == NULL)
892 goto end;
893 if (sk->sk_state != PPPOX_CONNECTED)
894 goto end;
895
896 error = -EBADF;
897 session = pppol2tp_sock_to_session(sk);
898 if (session == NULL)
899 goto end;
900
901 pls = l2tp_session_priv(session);
902 tunnel = l2tp_sock_to_tunnel(pls->tunnel_sock);
903 if (tunnel == NULL) {
904 error = -EBADF;
905 goto end_put_sess;
906 }
907
908 inet = inet_sk(sk);
909 if (tunnel->version == 2) {
910 struct sockaddr_pppol2tp sp;
911 len = sizeof(sp);
912 memset(&sp, 0, len);
913 sp.sa_family = AF_PPPOX;
914 sp.sa_protocol = PX_PROTO_OL2TP;
915 sp.pppol2tp.fd = tunnel->fd;
916 sp.pppol2tp.pid = pls->owner;
917 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
918 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
919 sp.pppol2tp.s_session = session->session_id;
920 sp.pppol2tp.d_session = session->peer_session_id;
921 sp.pppol2tp.addr.sin_family = AF_INET;
922 sp.pppol2tp.addr.sin_port = inet->inet_dport;
923 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
924 memcpy(uaddr, &sp, len);
925 } else if (tunnel->version == 3) {
926 struct sockaddr_pppol2tpv3 sp;
927 len = sizeof(sp);
928 memset(&sp, 0, len);
929 sp.sa_family = AF_PPPOX;
930 sp.sa_protocol = PX_PROTO_OL2TP;
931 sp.pppol2tp.fd = tunnel->fd;
932 sp.pppol2tp.pid = pls->owner;
933 sp.pppol2tp.s_tunnel = tunnel->tunnel_id;
934 sp.pppol2tp.d_tunnel = tunnel->peer_tunnel_id;
935 sp.pppol2tp.s_session = session->session_id;
936 sp.pppol2tp.d_session = session->peer_session_id;
937 sp.pppol2tp.addr.sin_family = AF_INET;
938 sp.pppol2tp.addr.sin_port = inet->inet_dport;
939 sp.pppol2tp.addr.sin_addr.s_addr = inet->inet_daddr;
940 memcpy(uaddr, &sp, len);
941 }
942
943 *usockaddr_len = len;
944
945 sock_put(pls->tunnel_sock);
946end_put_sess:
947 sock_put(sk);
948 error = 0;
949
950end:
951 return error;
952}
953
954/****************************************************************************
955 * ioctl() handlers.
956 *
957 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
958 * sockets. However, in order to control kernel tunnel features, we allow
959 * userspace to create a special "tunnel" PPPoX socket which is used for
960 * control only. Tunnel PPPoX sockets have session_id == 0 and simply allow
961 * the user application to issue L2TP setsockopt(), getsockopt() and ioctl()
962 * calls.
963 ****************************************************************************/
964
965static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,
966 struct l2tp_stats *stats)
967{
968 dest->tx_packets = stats->tx_packets;
969 dest->tx_bytes = stats->tx_bytes;
970 dest->tx_errors = stats->tx_errors;
971 dest->rx_packets = stats->rx_packets;
972 dest->rx_bytes = stats->rx_bytes;
973 dest->rx_seq_discards = stats->rx_seq_discards;
974 dest->rx_oos_packets = stats->rx_oos_packets;
975 dest->rx_errors = stats->rx_errors;
976}
977
978/* Session ioctl helper.
979 */
980static int pppol2tp_session_ioctl(struct l2tp_session *session,
981 unsigned int cmd, unsigned long arg)
982{
983 struct ifreq ifr;
984 int err = 0;
985 struct sock *sk;
986 int val = (int) arg;
987 struct pppol2tp_session *ps = l2tp_session_priv(session);
988 struct l2tp_tunnel *tunnel = session->tunnel;
989 struct pppol2tp_ioc_stats stats;
990
991 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
992 "%s: pppol2tp_session_ioctl(cmd=%#x, arg=%#lx)\n",
993 session->name, cmd, arg);
994
995 sk = ps->sock;
996 sock_hold(sk);
997
998 switch (cmd) {
999 case SIOCGIFMTU:
1000 err = -ENXIO;
1001 if (!(sk->sk_state & PPPOX_CONNECTED))
1002 break;
1003
1004 err = -EFAULT;
1005 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1006 break;
1007 ifr.ifr_mtu = session->mtu;
1008 if (copy_to_user((void __user *) arg, &ifr, sizeof(struct ifreq)))
1009 break;
1010
1011 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1012 "%s: get mtu=%d\n", session->name, session->mtu);
1013 err = 0;
1014 break;
1015
1016 case SIOCSIFMTU:
1017 err = -ENXIO;
1018 if (!(sk->sk_state & PPPOX_CONNECTED))
1019 break;
1020
1021 err = -EFAULT;
1022 if (copy_from_user(&ifr, (void __user *) arg, sizeof(struct ifreq)))
1023 break;
1024
1025 session->mtu = ifr.ifr_mtu;
1026
1027 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1028 "%s: set mtu=%d\n", session->name, session->mtu);
1029 err = 0;
1030 break;
1031
1032 case PPPIOCGMRU:
1033 err = -ENXIO;
1034 if (!(sk->sk_state & PPPOX_CONNECTED))
1035 break;
1036
1037 err = -EFAULT;
1038 if (put_user(session->mru, (int __user *) arg))
1039 break;
1040
1041 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1042 "%s: get mru=%d\n", session->name, session->mru);
1043 err = 0;
1044 break;
1045
1046 case PPPIOCSMRU:
1047 err = -ENXIO;
1048 if (!(sk->sk_state & PPPOX_CONNECTED))
1049 break;
1050
1051 err = -EFAULT;
1052 if (get_user(val, (int __user *) arg))
1053 break;
1054
1055 session->mru = val;
1056 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1057 "%s: set mru=%d\n", session->name, session->mru);
1058 err = 0;
1059 break;
1060
1061 case PPPIOCGFLAGS:
1062 err = -EFAULT;
1063 if (put_user(ps->flags, (int __user *) arg))
1064 break;
1065
1066 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1067 "%s: get flags=%d\n", session->name, ps->flags);
1068 err = 0;
1069 break;
1070
1071 case PPPIOCSFLAGS:
1072 err = -EFAULT;
1073 if (get_user(val, (int __user *) arg))
1074 break;
1075 ps->flags = val;
1076 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1077 "%s: set flags=%d\n", session->name, ps->flags);
1078 err = 0;
1079 break;
1080
1081 case PPPIOCGL2TPSTATS:
1082 err = -ENXIO;
1083 if (!(sk->sk_state & PPPOX_CONNECTED))
1084 break;
1085
1086 memset(&stats, 0, sizeof(stats));
1087 stats.tunnel_id = tunnel->tunnel_id;
1088 stats.session_id = session->session_id;
1089 pppol2tp_copy_stats(&stats, &session->stats);
1090 if (copy_to_user((void __user *) arg, &stats,
1091 sizeof(stats)))
1092 break;
1093 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1094 "%s: get L2TP stats\n", session->name);
1095 err = 0;
1096 break;
1097
1098 default:
1099 err = -ENOSYS;
1100 break;
1101 }
1102
1103 sock_put(sk);
1104
1105 return err;
1106}
1107
1108/* Tunnel ioctl helper.
1109 *
1110 * Note the special handling for PPPIOCGL2TPSTATS below. If the ioctl data
1111 * specifies a session_id, the session ioctl handler is called. This allows an
1112 * application to retrieve session stats via a tunnel socket.
1113 */
1114static int pppol2tp_tunnel_ioctl(struct l2tp_tunnel *tunnel,
1115 unsigned int cmd, unsigned long arg)
1116{
1117 int err = 0;
1118 struct sock *sk;
1119 struct pppol2tp_ioc_stats stats;
1120
1121 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_DEBUG,
1122 "%s: pppol2tp_tunnel_ioctl(cmd=%#x, arg=%#lx)\n",
1123 tunnel->name, cmd, arg);
1124
1125 sk = tunnel->sock;
1126 sock_hold(sk);
1127
1128 switch (cmd) {
1129 case PPPIOCGL2TPSTATS:
1130 err = -ENXIO;
1131 if (!(sk->sk_state & PPPOX_CONNECTED))
1132 break;
1133
1134 if (copy_from_user(&stats, (void __user *) arg,
1135 sizeof(stats))) {
1136 err = -EFAULT;
1137 break;
1138 }
1139 if (stats.session_id != 0) {
1140 /* resend to session ioctl handler */
1141 struct l2tp_session *session =
1142 l2tp_session_find(sock_net(sk), tunnel, stats.session_id);
1143 if (session != NULL)
1144 err = pppol2tp_session_ioctl(session, cmd, arg);
1145 else
1146 err = -EBADR;
1147 break;
1148 }
1149#ifdef CONFIG_XFRM
1150 stats.using_ipsec = (sk->sk_policy[0] || sk->sk_policy[1]) ? 1 : 0;
1151#endif
1152 pppol2tp_copy_stats(&stats, &tunnel->stats);
1153 if (copy_to_user((void __user *) arg, &stats, sizeof(stats))) {
1154 err = -EFAULT;
1155 break;
1156 }
1157 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1158 "%s: get L2TP stats\n", tunnel->name);
1159 err = 0;
1160 break;
1161
1162 default:
1163 err = -ENOSYS;
1164 break;
1165 }
1166
1167 sock_put(sk);
1168
1169 return err;
1170}
1171
1172/* Main ioctl() handler.
1173 * Dispatch to tunnel or session helpers depending on the socket.
1174 */
1175static int pppol2tp_ioctl(struct socket *sock, unsigned int cmd,
1176 unsigned long arg)
1177{
1178 struct sock *sk = sock->sk;
1179 struct l2tp_session *session;
1180 struct l2tp_tunnel *tunnel;
1181 struct pppol2tp_session *ps;
1182 int err;
1183
1184 if (!sk)
1185 return 0;
1186
1187 err = -EBADF;
1188 if (sock_flag(sk, SOCK_DEAD) != 0)
1189 goto end;
1190
1191 err = -ENOTCONN;
1192 if ((sk->sk_user_data == NULL) ||
1193 (!(sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND))))
1194 goto end;
1195
1196 /* Get session context from the socket */
1197 err = -EBADF;
1198 session = pppol2tp_sock_to_session(sk);
1199 if (session == NULL)
1200 goto end;
1201
1202 /* Special case: if session's session_id is zero, treat ioctl as a
1203 * tunnel ioctl
1204 */
1205 ps = l2tp_session_priv(session);
1206 if ((session->session_id == 0) &&
1207 (session->peer_session_id == 0)) {
1208 err = -EBADF;
1209 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1210 if (tunnel == NULL)
1211 goto end_put_sess;
1212
1213 err = pppol2tp_tunnel_ioctl(tunnel, cmd, arg);
1214 sock_put(ps->tunnel_sock);
1215 goto end_put_sess;
1216 }
1217
1218 err = pppol2tp_session_ioctl(session, cmd, arg);
1219
1220end_put_sess:
1221 sock_put(sk);
1222end:
1223 return err;
1224}
1225
1226/*****************************************************************************
1227 * setsockopt() / getsockopt() support.
1228 *
1229 * The PPPoX socket is created for L2TP sessions: tunnels have their own UDP
1230 * sockets. In order to control kernel tunnel features, we allow userspace to
1231 * create a special "tunnel" PPPoX socket which is used for control only.
1232 * Tunnel PPPoX sockets have session_id == 0 and simply allow the user
1233 * application to issue L2TP setsockopt(), getsockopt() and ioctl() calls.
1234 *****************************************************************************/
1235
1236/* Tunnel setsockopt() helper.
1237 */
1238static int pppol2tp_tunnel_setsockopt(struct sock *sk,
1239 struct l2tp_tunnel *tunnel,
1240 int optname, int val)
1241{
1242 int err = 0;
1243
1244 switch (optname) {
1245 case PPPOL2TP_SO_DEBUG:
1246 tunnel->debug = val;
1247 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1248 "%s: set debug=%x\n", tunnel->name, tunnel->debug);
1249 break;
1250
1251 default:
1252 err = -ENOPROTOOPT;
1253 break;
1254 }
1255
1256 return err;
1257}
1258
1259/* Session setsockopt helper.
1260 */
1261static int pppol2tp_session_setsockopt(struct sock *sk,
1262 struct l2tp_session *session,
1263 int optname, int val)
1264{
1265 int err = 0;
1266 struct pppol2tp_session *ps = l2tp_session_priv(session);
1267
1268 switch (optname) {
1269 case PPPOL2TP_SO_RECVSEQ:
1270 if ((val != 0) && (val != 1)) {
1271 err = -EINVAL;
1272 break;
1273 }
1274 session->recv_seq = val ? -1 : 0;
1275 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1276 "%s: set recv_seq=%d\n", session->name, session->recv_seq);
1277 break;
1278
1279 case PPPOL2TP_SO_SENDSEQ:
1280 if ((val != 0) && (val != 1)) {
1281 err = -EINVAL;
1282 break;
1283 }
1284 session->send_seq = val ? -1 : 0;
1285 {
1286 struct sock *ssk = ps->sock;
1287 struct pppox_sock *po = pppox_sk(ssk);
1288 po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
1289 PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
1290 }
1291 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1292 "%s: set send_seq=%d\n", session->name, session->send_seq);
1293 break;
1294
1295 case PPPOL2TP_SO_LNSMODE:
1296 if ((val != 0) && (val != 1)) {
1297 err = -EINVAL;
1298 break;
1299 }
1300 session->lns_mode = val ? -1 : 0;
1301 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1302 "%s: set lns_mode=%d\n", session->name, session->lns_mode);
1303 break;
1304
1305 case PPPOL2TP_SO_DEBUG:
1306 session->debug = val;
1307 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1308 "%s: set debug=%x\n", session->name, session->debug);
1309 break;
1310
1311 case PPPOL2TP_SO_REORDERTO:
1312 session->reorder_timeout = msecs_to_jiffies(val);
1313 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1314 "%s: set reorder_timeout=%d\n", session->name, session->reorder_timeout);
1315 break;
1316
1317 default:
1318 err = -ENOPROTOOPT;
1319 break;
1320 }
1321
1322 return err;
1323}
1324
1325/* Main setsockopt() entry point.
1326 * Does API checks, then calls either the tunnel or session setsockopt
1327 * handler, according to whether the PPPoL2TP socket is a for a regular
1328 * session or the special tunnel type.
1329 */
1330static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1331 char __user *optval, unsigned int optlen)
1332{
1333 struct sock *sk = sock->sk;
1334 struct l2tp_session *session;
1335 struct l2tp_tunnel *tunnel;
1336 struct pppol2tp_session *ps;
1337 int val;
1338 int err;
1339
1340 if (level != SOL_PPPOL2TP)
1341 return udp_prot.setsockopt(sk, level, optname, optval, optlen);
1342
1343 if (optlen < sizeof(int))
1344 return -EINVAL;
1345
1346 if (get_user(val, (int __user *)optval))
1347 return -EFAULT;
1348
1349 err = -ENOTCONN;
1350 if (sk->sk_user_data == NULL)
1351 goto end;
1352
1353 /* Get session context from the socket */
1354 err = -EBADF;
1355 session = pppol2tp_sock_to_session(sk);
1356 if (session == NULL)
1357 goto end;
1358
1359 /* Special case: if session_id == 0x0000, treat as operation on tunnel
1360 */
1361 ps = l2tp_session_priv(session);
1362 if ((session->session_id == 0) &&
1363 (session->peer_session_id == 0)) {
1364 err = -EBADF;
1365 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1366 if (tunnel == NULL)
1367 goto end_put_sess;
1368
1369 err = pppol2tp_tunnel_setsockopt(sk, tunnel, optname, val);
1370 sock_put(ps->tunnel_sock);
1371 } else
1372 err = pppol2tp_session_setsockopt(sk, session, optname, val);
1373
1374 err = 0;
1375
1376end_put_sess:
1377 sock_put(sk);
1378end:
1379 return err;
1380}
1381
1382/* Tunnel getsockopt helper. Called with sock locked.
1383 */
1384static int pppol2tp_tunnel_getsockopt(struct sock *sk,
1385 struct l2tp_tunnel *tunnel,
1386 int optname, int *val)
1387{
1388 int err = 0;
1389
1390 switch (optname) {
1391 case PPPOL2TP_SO_DEBUG:
1392 *val = tunnel->debug;
1393 PRINTK(tunnel->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1394 "%s: get debug=%x\n", tunnel->name, tunnel->debug);
1395 break;
1396
1397 default:
1398 err = -ENOPROTOOPT;
1399 break;
1400 }
1401
1402 return err;
1403}
1404
1405/* Session getsockopt helper. Called with sock locked.
1406 */
1407static int pppol2tp_session_getsockopt(struct sock *sk,
1408 struct l2tp_session *session,
1409 int optname, int *val)
1410{
1411 int err = 0;
1412
1413 switch (optname) {
1414 case PPPOL2TP_SO_RECVSEQ:
1415 *val = session->recv_seq;
1416 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1417 "%s: get recv_seq=%d\n", session->name, *val);
1418 break;
1419
1420 case PPPOL2TP_SO_SENDSEQ:
1421 *val = session->send_seq;
1422 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1423 "%s: get send_seq=%d\n", session->name, *val);
1424 break;
1425
1426 case PPPOL2TP_SO_LNSMODE:
1427 *val = session->lns_mode;
1428 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1429 "%s: get lns_mode=%d\n", session->name, *val);
1430 break;
1431
1432 case PPPOL2TP_SO_DEBUG:
1433 *val = session->debug;
1434 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1435 "%s: get debug=%d\n", session->name, *val);
1436 break;
1437
1438 case PPPOL2TP_SO_REORDERTO:
1439 *val = (int) jiffies_to_msecs(session->reorder_timeout);
1440 PRINTK(session->debug, PPPOL2TP_MSG_CONTROL, KERN_INFO,
1441 "%s: get reorder_timeout=%d\n", session->name, *val);
1442 break;
1443
1444 default:
1445 err = -ENOPROTOOPT;
1446 }
1447
1448 return err;
1449}
1450
1451/* Main getsockopt() entry point.
1452 * Does API checks, then calls either the tunnel or session getsockopt
1453 * handler, according to whether the PPPoX socket is a for a regular session
1454 * or the special tunnel type.
1455 */
1456static int pppol2tp_getsockopt(struct socket *sock, int level,
1457 int optname, char __user *optval, int __user *optlen)
1458{
1459 struct sock *sk = sock->sk;
1460 struct l2tp_session *session;
1461 struct l2tp_tunnel *tunnel;
1462 int val, len;
1463 int err;
1464 struct pppol2tp_session *ps;
1465
1466 if (level != SOL_PPPOL2TP)
1467 return udp_prot.getsockopt(sk, level, optname, optval, optlen);
1468
1469 if (get_user(len, (int __user *) optlen))
1470 return -EFAULT;
1471
1472 len = min_t(unsigned int, len, sizeof(int));
1473
1474 if (len < 0)
1475 return -EINVAL;
1476
1477 err = -ENOTCONN;
1478 if (sk->sk_user_data == NULL)
1479 goto end;
1480
1481 /* Get the session context */
1482 err = -EBADF;
1483 session = pppol2tp_sock_to_session(sk);
1484 if (session == NULL)
1485 goto end;
1486
1487 /* Special case: if session_id == 0x0000, treat as operation on tunnel */
1488 ps = l2tp_session_priv(session);
1489 if ((session->session_id == 0) &&
1490 (session->peer_session_id == 0)) {
1491 err = -EBADF;
1492 tunnel = l2tp_sock_to_tunnel(ps->tunnel_sock);
1493 if (tunnel == NULL)
1494 goto end_put_sess;
1495
1496 err = pppol2tp_tunnel_getsockopt(sk, tunnel, optname, &val);
1497 sock_put(ps->tunnel_sock);
1498 } else
1499 err = pppol2tp_session_getsockopt(sk, session, optname, &val);
1500
1501 err = -EFAULT;
1502 if (put_user(len, (int __user *) optlen))
1503 goto end_put_sess;
1504
1505 if (copy_to_user((void __user *) optval, &val, len))
1506 goto end_put_sess;
1507
1508 err = 0;
1509
1510end_put_sess:
1511 sock_put(sk);
1512end:
1513 return err;
1514}
1515
1516/*****************************************************************************
1517 * /proc filesystem for debug
1518 * Since the original pppol2tp driver provided /proc/net/pppol2tp for
1519 * L2TPv2, we dump only L2TPv2 tunnels and sessions here.
1520 *****************************************************************************/
1521
1522static unsigned int pppol2tp_net_id;
1523
1524#ifdef CONFIG_PROC_FS
1525
1526struct pppol2tp_seq_data {
1527 struct seq_net_private p;
1528 int tunnel_idx; /* current tunnel */
1529 int session_idx; /* index of session within current tunnel */
1530 struct l2tp_tunnel *tunnel;
1531 struct l2tp_session *session; /* NULL means get next tunnel */
1532};
1533
1534static void pppol2tp_next_tunnel(struct net *net, struct pppol2tp_seq_data *pd)
1535{
1536 for (;;) {
1537 pd->tunnel = l2tp_tunnel_find_nth(net, pd->tunnel_idx);
1538 pd->tunnel_idx++;
1539
1540 if (pd->tunnel == NULL)
1541 break;
1542
1543 /* Ignore L2TPv3 tunnels */
1544 if (pd->tunnel->version < 3)
1545 break;
1546 }
1547}
1548
1549static void pppol2tp_next_session(struct net *net, struct pppol2tp_seq_data *pd)
1550{
1551 pd->session = l2tp_session_find_nth(pd->tunnel, pd->session_idx);
1552 pd->session_idx++;
1553
1554 if (pd->session == NULL) {
1555 pd->session_idx = 0;
1556 pppol2tp_next_tunnel(net, pd);
1557 }
1558}
1559
1560static void *pppol2tp_seq_start(struct seq_file *m, loff_t *offs)
1561{
1562 struct pppol2tp_seq_data *pd = SEQ_START_TOKEN;
1563 loff_t pos = *offs;
1564 struct net *net;
1565
1566 if (!pos)
1567 goto out;
1568
1569 BUG_ON(m->private == NULL);
1570 pd = m->private;
1571 net = seq_file_net(m);
1572
1573 if (pd->tunnel == NULL)
1574 pppol2tp_next_tunnel(net, pd);
1575 else
1576 pppol2tp_next_session(net, pd);
1577
1578 /* NULL tunnel and session indicates end of list */
1579 if ((pd->tunnel == NULL) && (pd->session == NULL))
1580 pd = NULL;
1581
1582out:
1583 return pd;
1584}
1585
1586static void *pppol2tp_seq_next(struct seq_file *m, void *v, loff_t *pos)
1587{
1588 (*pos)++;
1589 return NULL;
1590}
1591
1592static void pppol2tp_seq_stop(struct seq_file *p, void *v)
1593{
1594 /* nothing to do */
1595}
1596
1597static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)
1598{
1599 struct l2tp_tunnel *tunnel = v;
1600
1601 seq_printf(m, "\nTUNNEL '%s', %c %d\n",
1602 tunnel->name,
1603 (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',
1604 atomic_read(&tunnel->ref_count) - 1);
1605 seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n",
1606 tunnel->debug,
1607 (unsigned long long)tunnel->stats.tx_packets,
1608 (unsigned long long)tunnel->stats.tx_bytes,
1609 (unsigned long long)tunnel->stats.tx_errors,
1610 (unsigned long long)tunnel->stats.rx_packets,
1611 (unsigned long long)tunnel->stats.rx_bytes,
1612 (unsigned long long)tunnel->stats.rx_errors);
1613}
1614
1615static void pppol2tp_seq_session_show(struct seq_file *m, void *v)
1616{
1617 struct l2tp_session *session = v;
1618 struct l2tp_tunnel *tunnel = session->tunnel;
1619 struct pppol2tp_session *ps = l2tp_session_priv(session);
1620 struct pppox_sock *po = pppox_sk(ps->sock);
1621 u32 ip = 0;
1622 u16 port = 0;
1623
1624 if (tunnel->sock) {
1625 struct inet_sock *inet = inet_sk(tunnel->sock);
1626 ip = ntohl(inet->inet_saddr);
1627 port = ntohs(inet->inet_sport);
1628 }
1629
1630 seq_printf(m, " SESSION '%s' %08X/%d %04X/%04X -> "
1631 "%04X/%04X %d %c\n",
1632 session->name, ip, port,
1633 tunnel->tunnel_id,
1634 session->session_id,
1635 tunnel->peer_tunnel_id,
1636 session->peer_session_id,
1637 ps->sock->sk_state,
1638 (session == ps->sock->sk_user_data) ?
1639 'Y' : 'N');
1640 seq_printf(m, " %d/%d/%c/%c/%s %08x %u\n",
1641 session->mtu, session->mru,
1642 session->recv_seq ? 'R' : '-',
1643 session->send_seq ? 'S' : '-',
1644 session->lns_mode ? "LNS" : "LAC",
1645 session->debug,
1646 jiffies_to_msecs(session->reorder_timeout));
1647 seq_printf(m, " %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n",
1648 session->nr, session->ns,
1649 (unsigned long long)session->stats.tx_packets,
1650 (unsigned long long)session->stats.tx_bytes,
1651 (unsigned long long)session->stats.tx_errors,
1652 (unsigned long long)session->stats.rx_packets,
1653 (unsigned long long)session->stats.rx_bytes,
1654 (unsigned long long)session->stats.rx_errors);
1655
1656 if (po)
1657 seq_printf(m, " interface %s\n", ppp_dev_name(&po->chan));
1658}
1659
1660static int pppol2tp_seq_show(struct seq_file *m, void *v)
1661{
1662 struct pppol2tp_seq_data *pd = v;
1663
1664 /* display header on line 1 */
1665 if (v == SEQ_START_TOKEN) {
1666 seq_puts(m, "PPPoL2TP driver info, " PPPOL2TP_DRV_VERSION "\n");
1667 seq_puts(m, "TUNNEL name, user-data-ok session-count\n");
1668 seq_puts(m, " debug tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
1669 seq_puts(m, " SESSION name, addr/port src-tid/sid "
1670 "dest-tid/sid state user-data-ok\n");
1671 seq_puts(m, " mtu/mru/rcvseq/sendseq/lns debug reorderto\n");
1672 seq_puts(m, " nr/ns tx-pkts/bytes/errs rx-pkts/bytes/errs\n");
1673 goto out;
1674 }
1675
1676 /* Show the tunnel or session context.
1677 */
1678 if (pd->session == NULL)
1679 pppol2tp_seq_tunnel_show(m, pd->tunnel);
1680 else
1681 pppol2tp_seq_session_show(m, pd->session);
1682
1683out:
1684 return 0;
1685}
1686
1687static const struct seq_operations pppol2tp_seq_ops = {
1688 .start = pppol2tp_seq_start,
1689 .next = pppol2tp_seq_next,
1690 .stop = pppol2tp_seq_stop,
1691 .show = pppol2tp_seq_show,
1692};
1693
1694/* Called when our /proc file is opened. We allocate data for use when
1695 * iterating our tunnel / session contexts and store it in the private
1696 * data of the seq_file.
1697 */
1698static int pppol2tp_proc_open(struct inode *inode, struct file *file)
1699{
1700 return seq_open_net(inode, file, &pppol2tp_seq_ops,
1701 sizeof(struct pppol2tp_seq_data));
1702}
1703
1704static const struct file_operations pppol2tp_proc_fops = {
1705 .owner = THIS_MODULE,
1706 .open = pppol2tp_proc_open,
1707 .read = seq_read,
1708 .llseek = seq_lseek,
1709 .release = seq_release_net,
1710};
1711
1712#endif /* CONFIG_PROC_FS */
1713
1714/*****************************************************************************
1715 * Network namespace
1716 *****************************************************************************/
1717
1718static __net_init int pppol2tp_init_net(struct net *net)
1719{
1720 struct proc_dir_entry *pde;
1721 int err = 0;
1722
1723 pde = proc_net_fops_create(net, "pppol2tp", S_IRUGO, &pppol2tp_proc_fops);
1724 if (!pde) {
1725 err = -ENOMEM;
1726 goto out;
1727 }
1728
1729out:
1730 return err;
1731}
1732
1733static __net_exit void pppol2tp_exit_net(struct net *net)
1734{
1735 proc_net_remove(net, "pppol2tp");
1736}
1737
1738static struct pernet_operations pppol2tp_net_ops = {
1739 .init = pppol2tp_init_net,
1740 .exit = pppol2tp_exit_net,
1741 .id = &pppol2tp_net_id,
1742};
1743
1744/*****************************************************************************
1745 * Init and cleanup
1746 *****************************************************************************/
1747
1748static const struct proto_ops pppol2tp_ops = {
1749 .family = AF_PPPOX,
1750 .owner = THIS_MODULE,
1751 .release = pppol2tp_release,
1752 .bind = sock_no_bind,
1753 .connect = pppol2tp_connect,
1754 .socketpair = sock_no_socketpair,
1755 .accept = sock_no_accept,
1756 .getname = pppol2tp_getname,
1757 .poll = datagram_poll,
1758 .listen = sock_no_listen,
1759 .shutdown = sock_no_shutdown,
1760 .setsockopt = pppol2tp_setsockopt,
1761 .getsockopt = pppol2tp_getsockopt,
1762 .sendmsg = pppol2tp_sendmsg,
1763 .recvmsg = pppol2tp_recvmsg,
1764 .mmap = sock_no_mmap,
1765 .ioctl = pppox_ioctl,
1766};
1767
1768static struct pppox_proto pppol2tp_proto = {
1769 .create = pppol2tp_create,
1770 .ioctl = pppol2tp_ioctl
1771};
1772
1773#ifdef CONFIG_L2TP_V3
1774
1775static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {
1776 .session_create = pppol2tp_session_create,
1777 .session_delete = pppol2tp_session_delete,
1778};
1779
1780#endif /* CONFIG_L2TP_V3 */
1781
1782static int __init pppol2tp_init(void)
1783{
1784 int err;
1785
1786 err = register_pernet_device(&pppol2tp_net_ops);
1787 if (err)
1788 goto out;
1789
1790 err = proto_register(&pppol2tp_sk_proto, 0);
1791 if (err)
1792 goto out_unregister_pppol2tp_pernet;
1793
1794 err = register_pppox_proto(PX_PROTO_OL2TP, &pppol2tp_proto);
1795 if (err)
1796 goto out_unregister_pppol2tp_proto;
1797
1798#ifdef CONFIG_L2TP_V3
1799 err = l2tp_nl_register_ops(L2TP_PWTYPE_PPP, &pppol2tp_nl_cmd_ops);
1800 if (err)
1801 goto out_unregister_pppox;
1802#endif
1803
1804 printk(KERN_INFO "PPPoL2TP kernel driver, %s\n",
1805 PPPOL2TP_DRV_VERSION);
1806
1807out:
1808 return err;
1809
1810#ifdef CONFIG_L2TP_V3
1811out_unregister_pppox:
1812 unregister_pppox_proto(PX_PROTO_OL2TP);
1813#endif
1814out_unregister_pppol2tp_proto:
1815 proto_unregister(&pppol2tp_sk_proto);
1816out_unregister_pppol2tp_pernet:
1817 unregister_pernet_device(&pppol2tp_net_ops);
1818 goto out;
1819}
1820
1821static void __exit pppol2tp_exit(void)
1822{
1823#ifdef CONFIG_L2TP_V3
1824 l2tp_nl_unregister_ops(L2TP_PWTYPE_PPP);
1825#endif
1826 unregister_pppox_proto(PX_PROTO_OL2TP);
1827 proto_unregister(&pppol2tp_sk_proto);
1828 unregister_pernet_device(&pppol2tp_net_ops);
1829}
1830
1831module_init(pppol2tp_init);
1832module_exit(pppol2tp_exit);
1833
1834MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
1835MODULE_DESCRIPTION("PPP over L2TP over UDP");
1836MODULE_LICENSE("GPL");
1837MODULE_VERSION(PPPOL2TP_DRV_VERSION);
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 2db6a9f75913..023ba820236f 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -536,7 +536,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
536 int rc = 0; 536 int rc = 0;
537 537
538 while (1) { 538 while (1) {
539 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 539 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
540 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE)) 540 if (sk_wait_event(sk, &timeout, sk->sk_state == TCP_CLOSE))
541 break; 541 break;
542 rc = -ERESTARTSYS; 542 rc = -ERESTARTSYS;
@@ -547,7 +547,7 @@ static int llc_ui_wait_for_disc(struct sock *sk, long timeout)
547 break; 547 break;
548 rc = 0; 548 rc = 0;
549 } 549 }
550 finish_wait(sk->sk_sleep, &wait); 550 finish_wait(sk_sleep(sk), &wait);
551 return rc; 551 return rc;
552} 552}
553 553
@@ -556,13 +556,13 @@ static int llc_ui_wait_for_conn(struct sock *sk, long timeout)
556 DEFINE_WAIT(wait); 556 DEFINE_WAIT(wait);
557 557
558 while (1) { 558 while (1) {
559 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 559 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
560 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT)) 560 if (sk_wait_event(sk, &timeout, sk->sk_state != TCP_SYN_SENT))
561 break; 561 break;
562 if (signal_pending(current) || !timeout) 562 if (signal_pending(current) || !timeout)
563 break; 563 break;
564 } 564 }
565 finish_wait(sk->sk_sleep, &wait); 565 finish_wait(sk_sleep(sk), &wait);
566 return timeout; 566 return timeout;
567} 567}
568 568
@@ -573,7 +573,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
573 int rc; 573 int rc;
574 574
575 while (1) { 575 while (1) {
576 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 576 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
577 rc = 0; 577 rc = 0;
578 if (sk_wait_event(sk, &timeout, 578 if (sk_wait_event(sk, &timeout,
579 (sk->sk_shutdown & RCV_SHUTDOWN) || 579 (sk->sk_shutdown & RCV_SHUTDOWN) ||
@@ -588,7 +588,7 @@ static int llc_ui_wait_for_busy_core(struct sock *sk, long timeout)
588 if (!timeout) 588 if (!timeout)
589 break; 589 break;
590 } 590 }
591 finish_wait(sk->sk_sleep, &wait); 591 finish_wait(sk_sleep(sk), &wait);
592 return rc; 592 return rc;
593} 593}
594 594
diff --git a/net/llc/llc_core.c b/net/llc/llc_core.c
index 78167e81dfeb..2bb0ddff8c0f 100644
--- a/net/llc/llc_core.c
+++ b/net/llc/llc_core.c
@@ -144,12 +144,6 @@ static struct packet_type llc_tr_packet_type __read_mostly = {
144 144
145static int __init llc_init(void) 145static int __init llc_init(void)
146{ 146{
147 struct net_device *dev;
148
149 dev = first_net_device(&init_net);
150 if (dev != NULL)
151 dev = next_net_device(dev);
152
153 dev_add_pack(&llc_packet_type); 147 dev_add_pack(&llc_packet_type);
154 dev_add_pack(&llc_tr_packet_type); 148 dev_add_pack(&llc_tr_packet_type);
155 return 0; 149 return 0;
diff --git a/net/mac80211/Kconfig b/net/mac80211/Kconfig
index a952b7f8c648..8a91f6c0bb18 100644
--- a/net/mac80211/Kconfig
+++ b/net/mac80211/Kconfig
@@ -15,8 +15,12 @@ comment "CFG80211 needs to be enabled for MAC80211"
15 15
16if MAC80211 != n 16if MAC80211 != n
17 17
18config MAC80211_HAS_RC
19 def_bool n
20
18config MAC80211_RC_PID 21config MAC80211_RC_PID
19 bool "PID controller based rate control algorithm" if EMBEDDED 22 bool "PID controller based rate control algorithm" if EMBEDDED
23 select MAC80211_HAS_RC
20 ---help--- 24 ---help---
21 This option enables a TX rate control algorithm for 25 This option enables a TX rate control algorithm for
22 mac80211 that uses a PID controller to select the TX 26 mac80211 that uses a PID controller to select the TX
@@ -24,12 +28,14 @@ config MAC80211_RC_PID
24 28
25config MAC80211_RC_MINSTREL 29config MAC80211_RC_MINSTREL
26 bool "Minstrel" if EMBEDDED 30 bool "Minstrel" if EMBEDDED
31 select MAC80211_HAS_RC
27 default y 32 default y
28 ---help--- 33 ---help---
29 This option enables the 'minstrel' TX rate control algorithm 34 This option enables the 'minstrel' TX rate control algorithm
30 35
31choice 36choice
32 prompt "Default rate control algorithm" 37 prompt "Default rate control algorithm"
38 depends on MAC80211_HAS_RC
33 default MAC80211_RC_DEFAULT_MINSTREL 39 default MAC80211_RC_DEFAULT_MINSTREL
34 ---help--- 40 ---help---
35 This option selects the default rate control algorithm 41 This option selects the default rate control algorithm
@@ -62,6 +68,9 @@ config MAC80211_RC_DEFAULT
62 68
63endif 69endif
64 70
71comment "Some wireless drivers require a rate control algorithm"
72 depends on MAC80211_HAS_RC=n
73
65config MAC80211_MESH 74config MAC80211_MESH
66 bool "Enable mac80211 mesh networking (pre-802.11s) support" 75 bool "Enable mac80211 mesh networking (pre-802.11s) support"
67 depends on MAC80211 && EXPERIMENTAL 76 depends on MAC80211 && EXPERIMENTAL
@@ -212,8 +221,8 @@ config MAC80211_DRIVER_API_TRACER
212 depends on EVENT_TRACING 221 depends on EVENT_TRACING
213 help 222 help
214 Say Y here to make mac80211 register with the ftrace 223 Say Y here to make mac80211 register with the ftrace
215 framework for the driver API -- you can see which 224 framework for the driver API -- you can then see which
216 driver methods it is calling then by looking at the 225 driver methods it is calling and which API functions
217 trace. 226 drivers are calling by looking at the trace.
218 227
219 If unsure, say N. 228 If unsure, say Y.
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index f9516a27e233..9598fdb4ad01 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -23,19 +23,20 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
23 u16 initiator, u16 reason) 23 u16 initiator, u16 reason)
24{ 24{
25 struct ieee80211_local *local = sta->local; 25 struct ieee80211_local *local = sta->local;
26 struct tid_ampdu_rx *tid_rx;
26 int i; 27 int i;
27 28
28 /* check if TID is in operational state */
29 spin_lock_bh(&sta->lock); 29 spin_lock_bh(&sta->lock);
30 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) { 30
31 /* check if TID is in operational state */
32 if (!sta->ampdu_mlme.tid_active_rx[tid]) {
31 spin_unlock_bh(&sta->lock); 33 spin_unlock_bh(&sta->lock);
32 return; 34 return;
33 } 35 }
34 36
35 sta->ampdu_mlme.tid_state_rx[tid] = 37 sta->ampdu_mlme.tid_active_rx[tid] = false;
36 HT_AGG_STATE_REQ_STOP_BA_MSK | 38
37 (initiator << HT_AGG_STATE_INITIATOR_SHIFT); 39 tid_rx = sta->ampdu_mlme.tid_rx[tid];
38 spin_unlock_bh(&sta->lock);
39 40
40#ifdef CONFIG_MAC80211_HT_DEBUG 41#ifdef CONFIG_MAC80211_HT_DEBUG
41 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n", 42 printk(KERN_DEBUG "Rx BA session stop requested for %pM tid %u\n",
@@ -47,61 +48,35 @@ void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
47 printk(KERN_DEBUG "HW problem - can not stop rx " 48 printk(KERN_DEBUG "HW problem - can not stop rx "
48 "aggregation for tid %d\n", tid); 49 "aggregation for tid %d\n", tid);
49 50
50 /* shutdown timer has not expired */
51 if (initiator != WLAN_BACK_TIMER)
52 del_timer_sync(&sta->ampdu_mlme.tid_rx[tid]->session_timer);
53
54 /* check if this is a self generated aggregation halt */ 51 /* check if this is a self generated aggregation halt */
55 if (initiator == WLAN_BACK_RECIPIENT || initiator == WLAN_BACK_TIMER) 52 if (initiator == WLAN_BACK_RECIPIENT)
56 ieee80211_send_delba(sta->sdata, sta->sta.addr, 53 ieee80211_send_delba(sta->sdata, sta->sta.addr,
57 tid, 0, reason); 54 tid, 0, reason);
58 55
59 /* free the reordering buffer */ 56 /* free the reordering buffer */
60 for (i = 0; i < sta->ampdu_mlme.tid_rx[tid]->buf_size; i++) { 57 for (i = 0; i < tid_rx->buf_size; i++) {
61 if (sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]) { 58 if (tid_rx->reorder_buf[i]) {
62 /* release the reordered frames */ 59 /* release the reordered frames */
63 dev_kfree_skb(sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i]); 60 dev_kfree_skb(tid_rx->reorder_buf[i]);
64 sta->ampdu_mlme.tid_rx[tid]->stored_mpdu_num--; 61 tid_rx->stored_mpdu_num--;
65 sta->ampdu_mlme.tid_rx[tid]->reorder_buf[i] = NULL; 62 tid_rx->reorder_buf[i] = NULL;
66 } 63 }
67 } 64 }
68 65
69 spin_lock_bh(&sta->lock);
70 /* free resources */ 66 /* free resources */
71 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_buf); 67 kfree(tid_rx->reorder_buf);
72 kfree(sta->ampdu_mlme.tid_rx[tid]->reorder_time); 68 kfree(tid_rx->reorder_time);
69 sta->ampdu_mlme.tid_rx[tid] = NULL;
73 70
74 if (!sta->ampdu_mlme.tid_rx[tid]->shutdown) {
75 kfree(sta->ampdu_mlme.tid_rx[tid]);
76 sta->ampdu_mlme.tid_rx[tid] = NULL;
77 }
78
79 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_IDLE;
80 spin_unlock_bh(&sta->lock); 71 spin_unlock_bh(&sta->lock);
81}
82
83void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid,
84 u16 initiator, u16 reason)
85{
86 struct sta_info *sta;
87
88 rcu_read_lock();
89
90 sta = sta_info_get(sdata, ra);
91 if (!sta) {
92 rcu_read_unlock();
93 return;
94 }
95
96 __ieee80211_stop_rx_ba_session(sta, tid, initiator, reason);
97 72
98 rcu_read_unlock(); 73 del_timer_sync(&tid_rx->session_timer);
74 kfree(tid_rx);
99} 75}
100 76
101/* 77/*
102 * After accepting the AddBA Request we activated a timer, 78 * After accepting the AddBA Request we activated a timer,
103 * resetting it after each frame that arrives from the originator. 79 * resetting it after each frame that arrives from the originator.
104 * if this timer expires ieee80211_sta_stop_rx_ba_session will be executed.
105 */ 80 */
106static void sta_rx_agg_session_timer_expired(unsigned long data) 81static void sta_rx_agg_session_timer_expired(unsigned long data)
107{ 82{
@@ -117,9 +92,8 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
117#ifdef CONFIG_MAC80211_HT_DEBUG 92#ifdef CONFIG_MAC80211_HT_DEBUG
118 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 93 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
119#endif 94#endif
120 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, 95 __ieee80211_stop_rx_ba_session(sta, *ptid, WLAN_BACK_RECIPIENT,
121 (u16)*ptid, WLAN_BACK_TIMER, 96 WLAN_REASON_QSTA_TIMEOUT);
122 WLAN_REASON_QSTA_TIMEOUT);
123} 97}
124 98
125static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, 99static void ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid,
@@ -194,7 +168,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
194 168
195 status = WLAN_STATUS_REQUEST_DECLINED; 169 status = WLAN_STATUS_REQUEST_DECLINED;
196 170
197 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 171 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
198#ifdef CONFIG_MAC80211_HT_DEBUG 172#ifdef CONFIG_MAC80211_HT_DEBUG
199 printk(KERN_DEBUG "Suspend in progress. " 173 printk(KERN_DEBUG "Suspend in progress. "
200 "Denying ADDBA request\n"); 174 "Denying ADDBA request\n");
@@ -232,7 +206,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
232 /* examine state machine */ 206 /* examine state machine */
233 spin_lock_bh(&sta->lock); 207 spin_lock_bh(&sta->lock);
234 208
235 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_IDLE) { 209 if (sta->ampdu_mlme.tid_active_rx[tid]) {
236#ifdef CONFIG_MAC80211_HT_DEBUG 210#ifdef CONFIG_MAC80211_HT_DEBUG
237 if (net_ratelimit()) 211 if (net_ratelimit())
238 printk(KERN_DEBUG "unexpected AddBA Req from " 212 printk(KERN_DEBUG "unexpected AddBA Req from "
@@ -294,7 +268,7 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
294 } 268 }
295 269
296 /* change state and send addba resp */ 270 /* change state and send addba resp */
297 sta->ampdu_mlme.tid_state_rx[tid] = HT_AGG_STATE_OPERATIONAL; 271 sta->ampdu_mlme.tid_active_rx[tid] = true;
298 tid_agg_rx->dialog_token = dialog_token; 272 tid_agg_rx->dialog_token = dialog_token;
299 tid_agg_rx->ssn = start_seq_num; 273 tid_agg_rx->ssn = start_seq_num;
300 tid_agg_rx->head_seq_num = start_seq_num; 274 tid_agg_rx->head_seq_num = start_seq_num;
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 87782a4bb541..555c6a14a6fa 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -214,6 +214,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
214 int ret = 0; 214 int ret = 0;
215 u16 start_seq_num; 215 u16 start_seq_num;
216 216
217 trace_api_start_tx_ba_session(pubsta, tid);
218
217 if (WARN_ON(!local->ops->ampdu_action)) 219 if (WARN_ON(!local->ops->ampdu_action))
218 return -EINVAL; 220 return -EINVAL;
219 221
@@ -245,7 +247,7 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid)
245 return -EINVAL; 247 return -EINVAL;
246 } 248 }
247 249
248 if (test_sta_flags(sta, WLAN_STA_SUSPEND)) { 250 if (test_sta_flags(sta, WLAN_STA_BLOCK_BA)) {
249#ifdef CONFIG_MAC80211_HT_DEBUG 251#ifdef CONFIG_MAC80211_HT_DEBUG
250 printk(KERN_DEBUG "Suspend in progress. " 252 printk(KERN_DEBUG "Suspend in progress. "
251 "Denying BA session request\n"); 253 "Denying BA session request\n");
@@ -414,7 +416,7 @@ static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
414 struct sta_info *sta, u16 tid) 416 struct sta_info *sta, u16 tid)
415{ 417{
416#ifdef CONFIG_MAC80211_HT_DEBUG 418#ifdef CONFIG_MAC80211_HT_DEBUG
417 printk(KERN_DEBUG "Aggregation is on for tid %d \n", tid); 419 printk(KERN_DEBUG "Aggregation is on for tid %d\n", tid);
418#endif 420#endif
419 421
420 spin_lock(&local->ampdu_lock); 422 spin_lock(&local->ampdu_lock);
@@ -440,6 +442,8 @@ void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
440 struct sta_info *sta; 442 struct sta_info *sta;
441 u8 *state; 443 u8 *state;
442 444
445 trace_api_start_tx_ba_cb(sdata, ra, tid);
446
443 if (tid >= STA_TID_NUM) { 447 if (tid >= STA_TID_NUM) {
444#ifdef CONFIG_MAC80211_HT_DEBUG 448#ifdef CONFIG_MAC80211_HT_DEBUG
445 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 449 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -541,6 +545,8 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
541 struct ieee80211_sub_if_data *sdata = sta->sdata; 545 struct ieee80211_sub_if_data *sdata = sta->sdata;
542 struct ieee80211_local *local = sdata->local; 546 struct ieee80211_local *local = sdata->local;
543 547
548 trace_api_stop_tx_ba_session(pubsta, tid, initiator);
549
544 if (!local->ops->ampdu_action) 550 if (!local->ops->ampdu_action)
545 return -EINVAL; 551 return -EINVAL;
546 552
@@ -558,6 +564,8 @@ void ieee80211_stop_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u8 tid)
558 struct sta_info *sta; 564 struct sta_info *sta;
559 u8 *state; 565 u8 *state;
560 566
567 trace_api_stop_tx_ba_cb(sdata, ra, tid);
568
561 if (tid >= STA_TID_NUM) { 569 if (tid >= STA_TID_NUM) {
562#ifdef CONFIG_MAC80211_HT_DEBUG 570#ifdef CONFIG_MAC80211_HT_DEBUG
563 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n", 571 printk(KERN_DEBUG "Bad TID value: tid = %d (>= %d)\n",
@@ -674,7 +682,7 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
674 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer); 682 del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
675 683
676#ifdef CONFIG_MAC80211_HT_DEBUG 684#ifdef CONFIG_MAC80211_HT_DEBUG
677 printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid); 685 printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
678#endif /* CONFIG_MAC80211_HT_DEBUG */ 686#endif /* CONFIG_MAC80211_HT_DEBUG */
679 687
680 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status) 688 if (le16_to_cpu(mgmt->u.action.u.addba_resp.status)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index edc872e22c9b..7dd7cda75cfa 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1137,6 +1137,10 @@ static int ieee80211_set_txq_params(struct wiphy *wiphy,
1137 return -EINVAL; 1137 return -EINVAL;
1138 } 1138 }
1139 1139
1140 /* enable WMM or activate new settings */
1141 local->hw.conf.flags |= IEEE80211_CONF_QOS;
1142 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
1143
1140 return 0; 1144 return 0;
1141} 1145}
1142 1146
@@ -1403,6 +1407,35 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
1403 return 0; 1407 return 0;
1404} 1408}
1405 1409
1410static int ieee80211_set_cqm_rssi_config(struct wiphy *wiphy,
1411 struct net_device *dev,
1412 s32 rssi_thold, u32 rssi_hyst)
1413{
1414 struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
1415 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
1416 struct ieee80211_vif *vif = &sdata->vif;
1417 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1418
1419 if (rssi_thold == bss_conf->cqm_rssi_thold &&
1420 rssi_hyst == bss_conf->cqm_rssi_hyst)
1421 return 0;
1422
1423 bss_conf->cqm_rssi_thold = rssi_thold;
1424 bss_conf->cqm_rssi_hyst = rssi_hyst;
1425
1426 if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1427 if (sdata->vif.type != NL80211_IFTYPE_STATION)
1428 return -EOPNOTSUPP;
1429 return 0;
1430 }
1431
1432 /* tell the driver upon association, unless already associated */
1433 if (sdata->u.mgd.associated)
1434 ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_CQM);
1435
1436 return 0;
1437}
1438
1406static int ieee80211_set_bitrate_mask(struct wiphy *wiphy, 1439static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
1407 struct net_device *dev, 1440 struct net_device *dev,
1408 const u8 *addr, 1441 const u8 *addr,
@@ -1507,4 +1540,5 @@ struct cfg80211_ops mac80211_config_ops = {
1507 .remain_on_channel = ieee80211_remain_on_channel, 1540 .remain_on_channel = ieee80211_remain_on_channel,
1508 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel, 1541 .cancel_remain_on_channel = ieee80211_cancel_remain_on_channel,
1509 .action = ieee80211_action, 1542 .action = ieee80211_action,
1543 .set_cqm_rssi_config = ieee80211_set_cqm_rssi_config,
1510}; 1544};
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 83d4289d954b..20b2998fa0ed 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -100,6 +100,14 @@ static ssize_t ieee80211_if_fmt_##name( \
100 return scnprintf(buf, buflen, "%pM\n", sdata->field); \ 100 return scnprintf(buf, buflen, "%pM\n", sdata->field); \
101} 101}
102 102
103#define IEEE80211_IF_FMT_DEC_DIV_16(name, field) \
104static ssize_t ieee80211_if_fmt_##name( \
105 const struct ieee80211_sub_if_data *sdata, \
106 char *buf, int buflen) \
107{ \
108 return scnprintf(buf, buflen, "%d\n", sdata->field / 16); \
109}
110
103#define __IEEE80211_IF_FILE(name, _write) \ 111#define __IEEE80211_IF_FILE(name, _write) \
104static ssize_t ieee80211_if_read_##name(struct file *file, \ 112static ssize_t ieee80211_if_read_##name(struct file *file, \
105 char __user *userbuf, \ 113 char __user *userbuf, \
@@ -140,6 +148,8 @@ IEEE80211_IF_FILE(rc_rateidx_mask_5ghz, rc_rateidx_mask[IEEE80211_BAND_5GHZ],
140/* STA attributes */ 148/* STA attributes */
141IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC); 149IEEE80211_IF_FILE(bssid, u.mgd.bssid, MAC);
142IEEE80211_IF_FILE(aid, u.mgd.aid, DEC); 150IEEE80211_IF_FILE(aid, u.mgd.aid, DEC);
151IEEE80211_IF_FILE(last_beacon, u.mgd.last_beacon_signal, DEC);
152IEEE80211_IF_FILE(ave_beacon, u.mgd.ave_beacon_signal, DEC_DIV_16);
143 153
144static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata, 154static int ieee80211_set_smps(struct ieee80211_sub_if_data *sdata,
145 enum ieee80211_smps_mode smps_mode) 155 enum ieee80211_smps_mode smps_mode)
@@ -276,6 +286,8 @@ static void add_sta_files(struct ieee80211_sub_if_data *sdata)
276 286
277 DEBUGFS_ADD(bssid); 287 DEBUGFS_ADD(bssid);
278 DEBUGFS_ADD(aid); 288 DEBUGFS_ADD(aid);
289 DEBUGFS_ADD(last_beacon);
290 DEBUGFS_ADD(ave_beacon);
279 DEBUGFS_ADD_MODE(smps, 0600); 291 DEBUGFS_ADD_MODE(smps, 0600);
280} 292}
281 293
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index d92800bb2d2f..6bc9b07c3eda 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -57,7 +57,6 @@ STA_FILE(tx_filtered, tx_filtered_count, LU);
57STA_FILE(tx_retry_failed, tx_retry_failed, LU); 57STA_FILE(tx_retry_failed, tx_retry_failed, LU);
58STA_FILE(tx_retry_count, tx_retry_count, LU); 58STA_FILE(tx_retry_count, tx_retry_count, LU);
59STA_FILE(last_signal, last_signal, D); 59STA_FILE(last_signal, last_signal, D);
60STA_FILE(last_noise, last_noise, D);
61STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU); 60STA_FILE(wep_weak_iv_count, wep_weak_iv_count, LU);
62 61
63static ssize_t sta_flags_read(struct file *file, char __user *userbuf, 62static ssize_t sta_flags_read(struct file *file, char __user *userbuf,
@@ -120,7 +119,7 @@ STA_OPS(last_seq_ctrl);
120static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf, 119static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
121 size_t count, loff_t *ppos) 120 size_t count, loff_t *ppos)
122{ 121{
123 char buf[64 + STA_TID_NUM * 40], *p = buf; 122 char buf[71 + STA_TID_NUM * 40], *p = buf;
124 int i; 123 int i;
125 struct sta_info *sta = file->private_data; 124 struct sta_info *sta = file->private_data;
126 125
@@ -128,16 +127,16 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
128 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n", 127 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
129 sta->ampdu_mlme.dialog_token_allocator + 1); 128 sta->ampdu_mlme.dialog_token_allocator + 1);
130 p += scnprintf(p, sizeof(buf) + buf - p, 129 p += scnprintf(p, sizeof(buf) + buf - p,
131 "TID\t\tRX\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n"); 130 "TID\t\tRX active\tDTKN\tSSN\t\tTX\tDTKN\tSSN\tpending\n");
132 for (i = 0; i < STA_TID_NUM; i++) { 131 for (i = 0; i < STA_TID_NUM; i++) {
133 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i); 132 p += scnprintf(p, sizeof(buf) + buf - p, "%02d", i);
134 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 133 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
135 sta->ampdu_mlme.tid_state_rx[i]); 134 sta->ampdu_mlme.tid_active_rx[i]);
136 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x", 135 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.2x",
137 sta->ampdu_mlme.tid_state_rx[i] ? 136 sta->ampdu_mlme.tid_active_rx[i] ?
138 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0); 137 sta->ampdu_mlme.tid_rx[i]->dialog_token : 0);
139 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x", 138 p += scnprintf(p, sizeof(buf) + buf - p, "\t%#.3x",
140 sta->ampdu_mlme.tid_state_rx[i] ? 139 sta->ampdu_mlme.tid_active_rx[i] ?
141 sta->ampdu_mlme.tid_rx[i]->ssn : 0); 140 sta->ampdu_mlme.tid_rx[i]->ssn : 0);
142 141
143 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x", 142 p += scnprintf(p, sizeof(buf) + buf - p, "\t\t%x",
@@ -177,7 +176,7 @@ static ssize_t sta_ht_capa_read(struct file *file, char __user *userbuf,
177 if (htc->ht_supported) { 176 if (htc->ht_supported) {
178 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap); 177 p += scnprintf(p, sizeof(buf)+buf-p, "cap: %#.4x\n", htc->cap);
179 178
180 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDCP"); 179 PRINT_HT_CAP((htc->cap & BIT(0)), "RX LDPC");
181 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40"); 180 PRINT_HT_CAP((htc->cap & BIT(1)), "HT20/HT40");
182 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20"); 181 PRINT_HT_CAP(!(htc->cap & BIT(1)), "HT20");
183 182
@@ -289,7 +288,6 @@ void ieee80211_sta_debugfs_add(struct sta_info *sta)
289 DEBUGFS_ADD(tx_retry_failed); 288 DEBUGFS_ADD(tx_retry_failed);
290 DEBUGFS_ADD(tx_retry_count); 289 DEBUGFS_ADD(tx_retry_count);
291 DEBUGFS_ADD(last_signal); 290 DEBUGFS_ADD(last_signal);
292 DEBUGFS_ADD(last_noise);
293 DEBUGFS_ADD(wep_weak_iv_count); 291 DEBUGFS_ADD(wep_weak_iv_count);
294 DEBUGFS_ADD(ht_capa); 292 DEBUGFS_ADD(ht_capa);
295} 293}
diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
index c3d844093a2f..9179196da264 100644
--- a/net/mac80211/driver-ops.h
+++ b/net/mac80211/driver-ops.h
@@ -84,16 +84,14 @@ static inline void drv_bss_info_changed(struct ieee80211_local *local,
84} 84}
85 85
86static inline u64 drv_prepare_multicast(struct ieee80211_local *local, 86static inline u64 drv_prepare_multicast(struct ieee80211_local *local,
87 int mc_count, 87 struct netdev_hw_addr_list *mc_list)
88 struct dev_addr_list *mc_list)
89{ 88{
90 u64 ret = 0; 89 u64 ret = 0;
91 90
92 if (local->ops->prepare_multicast) 91 if (local->ops->prepare_multicast)
93 ret = local->ops->prepare_multicast(&local->hw, mc_count, 92 ret = local->ops->prepare_multicast(&local->hw, mc_list);
94 mc_list);
95 93
96 trace_drv_prepare_multicast(local, mc_count, ret); 94 trace_drv_prepare_multicast(local, mc_list->count, ret);
97 95
98 return ret; 96 return ret;
99} 97}
diff --git a/net/mac80211/driver-trace.h b/net/mac80211/driver-trace.h
index 41baf730a5c7..e209cb82ff29 100644
--- a/net/mac80211/driver-trace.h
+++ b/net/mac80211/driver-trace.h
@@ -32,6 +32,10 @@ static inline void trace_ ## name(proto) {}
32#define VIF_PR_FMT " vif:%s(%d)" 32#define VIF_PR_FMT " vif:%s(%d)"
33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type 33#define VIF_PR_ARG __get_str(vif_name), __entry->vif_type
34 34
35/*
36 * Tracing for driver callbacks.
37 */
38
35TRACE_EVENT(drv_start, 39TRACE_EVENT(drv_start,
36 TP_PROTO(struct ieee80211_local *local, int ret), 40 TP_PROTO(struct ieee80211_local *local, int ret),
37 41
@@ -766,6 +770,277 @@ TRACE_EVENT(drv_flush,
766 LOCAL_PR_ARG, __entry->drop 770 LOCAL_PR_ARG, __entry->drop
767 ) 771 )
768); 772);
773
774/*
775 * Tracing for API calls that drivers call.
776 */
777
778TRACE_EVENT(api_start_tx_ba_session,
779 TP_PROTO(struct ieee80211_sta *sta, u16 tid),
780
781 TP_ARGS(sta, tid),
782
783 TP_STRUCT__entry(
784 STA_ENTRY
785 __field(u16, tid)
786 ),
787
788 TP_fast_assign(
789 STA_ASSIGN;
790 __entry->tid = tid;
791 ),
792
793 TP_printk(
794 STA_PR_FMT " tid:%d",
795 STA_PR_ARG, __entry->tid
796 )
797);
798
799TRACE_EVENT(api_start_tx_ba_cb,
800 TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
801
802 TP_ARGS(sdata, ra, tid),
803
804 TP_STRUCT__entry(
805 VIF_ENTRY
806 __array(u8, ra, ETH_ALEN)
807 __field(u16, tid)
808 ),
809
810 TP_fast_assign(
811 VIF_ASSIGN;
812 memcpy(__entry->ra, ra, ETH_ALEN);
813 __entry->tid = tid;
814 ),
815
816 TP_printk(
817 VIF_PR_FMT " ra:%pM tid:%d",
818 VIF_PR_ARG, __entry->ra, __entry->tid
819 )
820);
821
822TRACE_EVENT(api_stop_tx_ba_session,
823 TP_PROTO(struct ieee80211_sta *sta, u16 tid, u16 initiator),
824
825 TP_ARGS(sta, tid, initiator),
826
827 TP_STRUCT__entry(
828 STA_ENTRY
829 __field(u16, tid)
830 __field(u16, initiator)
831 ),
832
833 TP_fast_assign(
834 STA_ASSIGN;
835 __entry->tid = tid;
836 __entry->initiator = initiator;
837 ),
838
839 TP_printk(
840 STA_PR_FMT " tid:%d initiator:%d",
841 STA_PR_ARG, __entry->tid, __entry->initiator
842 )
843);
844
845TRACE_EVENT(api_stop_tx_ba_cb,
846 TP_PROTO(struct ieee80211_sub_if_data *sdata, const u8 *ra, u16 tid),
847
848 TP_ARGS(sdata, ra, tid),
849
850 TP_STRUCT__entry(
851 VIF_ENTRY
852 __array(u8, ra, ETH_ALEN)
853 __field(u16, tid)
854 ),
855
856 TP_fast_assign(
857 VIF_ASSIGN;
858 memcpy(__entry->ra, ra, ETH_ALEN);
859 __entry->tid = tid;
860 ),
861
862 TP_printk(
863 VIF_PR_FMT " ra:%pM tid:%d",
864 VIF_PR_ARG, __entry->ra, __entry->tid
865 )
866);
867
868TRACE_EVENT(api_restart_hw,
869 TP_PROTO(struct ieee80211_local *local),
870
871 TP_ARGS(local),
872
873 TP_STRUCT__entry(
874 LOCAL_ENTRY
875 ),
876
877 TP_fast_assign(
878 LOCAL_ASSIGN;
879 ),
880
881 TP_printk(
882 LOCAL_PR_FMT,
883 LOCAL_PR_ARG
884 )
885);
886
887TRACE_EVENT(api_beacon_loss,
888 TP_PROTO(struct ieee80211_sub_if_data *sdata),
889
890 TP_ARGS(sdata),
891
892 TP_STRUCT__entry(
893 VIF_ENTRY
894 ),
895
896 TP_fast_assign(
897 VIF_ASSIGN;
898 ),
899
900 TP_printk(
901 VIF_PR_FMT,
902 VIF_PR_ARG
903 )
904);
905
906TRACE_EVENT(api_connection_loss,
907 TP_PROTO(struct ieee80211_sub_if_data *sdata),
908
909 TP_ARGS(sdata),
910
911 TP_STRUCT__entry(
912 VIF_ENTRY
913 ),
914
915 TP_fast_assign(
916 VIF_ASSIGN;
917 ),
918
919 TP_printk(
920 VIF_PR_FMT,
921 VIF_PR_ARG
922 )
923);
924
925TRACE_EVENT(api_cqm_rssi_notify,
926 TP_PROTO(struct ieee80211_sub_if_data *sdata,
927 enum nl80211_cqm_rssi_threshold_event rssi_event),
928
929 TP_ARGS(sdata, rssi_event),
930
931 TP_STRUCT__entry(
932 VIF_ENTRY
933 __field(u32, rssi_event)
934 ),
935
936 TP_fast_assign(
937 VIF_ASSIGN;
938 __entry->rssi_event = rssi_event;
939 ),
940
941 TP_printk(
942 VIF_PR_FMT " event:%d",
943 VIF_PR_ARG, __entry->rssi_event
944 )
945);
946
947TRACE_EVENT(api_scan_completed,
948 TP_PROTO(struct ieee80211_local *local, bool aborted),
949
950 TP_ARGS(local, aborted),
951
952 TP_STRUCT__entry(
953 LOCAL_ENTRY
954 __field(bool, aborted)
955 ),
956
957 TP_fast_assign(
958 LOCAL_ASSIGN;
959 __entry->aborted = aborted;
960 ),
961
962 TP_printk(
963 LOCAL_PR_FMT " aborted:%d",
964 LOCAL_PR_ARG, __entry->aborted
965 )
966);
967
968TRACE_EVENT(api_sta_block_awake,
969 TP_PROTO(struct ieee80211_local *local,
970 struct ieee80211_sta *sta, bool block),
971
972 TP_ARGS(local, sta, block),
973
974 TP_STRUCT__entry(
975 LOCAL_ENTRY
976 STA_ENTRY
977 __field(bool, block)
978 ),
979
980 TP_fast_assign(
981 LOCAL_ASSIGN;
982 STA_ASSIGN;
983 __entry->block = block;
984 ),
985
986 TP_printk(
987 LOCAL_PR_FMT STA_PR_FMT " block:%d",
988 LOCAL_PR_ARG, STA_PR_FMT, __entry->block
989 )
990);
991
992/*
993 * Tracing for internal functions
994 * (which may also be called in response to driver calls)
995 */
996
997TRACE_EVENT(wake_queue,
998 TP_PROTO(struct ieee80211_local *local, u16 queue,
999 enum queue_stop_reason reason),
1000
1001 TP_ARGS(local, queue, reason),
1002
1003 TP_STRUCT__entry(
1004 LOCAL_ENTRY
1005 __field(u16, queue)
1006 __field(u32, reason)
1007 ),
1008
1009 TP_fast_assign(
1010 LOCAL_ASSIGN;
1011 __entry->queue = queue;
1012 __entry->reason = reason;
1013 ),
1014
1015 TP_printk(
1016 LOCAL_PR_FMT " queue:%d, reason:%d",
1017 LOCAL_PR_ARG, __entry->queue, __entry->reason
1018 )
1019);
1020
1021TRACE_EVENT(stop_queue,
1022 TP_PROTO(struct ieee80211_local *local, u16 queue,
1023 enum queue_stop_reason reason),
1024
1025 TP_ARGS(local, queue, reason),
1026
1027 TP_STRUCT__entry(
1028 LOCAL_ENTRY
1029 __field(u16, queue)
1030 __field(u32, reason)
1031 ),
1032
1033 TP_fast_assign(
1034 LOCAL_ASSIGN;
1035 __entry->queue = queue;
1036 __entry->reason = reason;
1037 ),
1038
1039 TP_printk(
1040 LOCAL_PR_FMT " queue:%d, reason:%d",
1041 LOCAL_PR_ARG, __entry->queue, __entry->reason
1042 )
1043);
769#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */ 1044#endif /* !__MAC80211_DRIVER_TRACE || TRACE_HEADER_MULTI_READ */
770 1045
771#undef TRACE_INCLUDE_PATH 1046#undef TRACE_INCLUDE_PATH
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index bb677a73b7c9..2ab106a0a491 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -175,8 +175,7 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
175#endif /* CONFIG_MAC80211_HT_DEBUG */ 175#endif /* CONFIG_MAC80211_HT_DEBUG */
176 176
177 if (initiator == WLAN_BACK_INITIATOR) 177 if (initiator == WLAN_BACK_INITIATOR)
178 ieee80211_sta_stop_rx_ba_session(sdata, sta->sta.addr, tid, 178 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_INITIATOR, 0);
179 WLAN_BACK_INITIATOR, 0);
180 else { /* WLAN_BACK_RECIPIENT */ 179 else { /* WLAN_BACK_RECIPIENT */
181 spin_lock_bh(&sta->lock); 180 spin_lock_bh(&sta->lock);
182 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK) 181 if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index e2976da4e0d9..e6f3b0c7a71f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -265,17 +265,16 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
265 sta->sta.supp_rates[band] = supp_rates | 265 sta->sta.supp_rates[band] = supp_rates |
266 ieee80211_mandatory_rates(local, band); 266 ieee80211_mandatory_rates(local, band);
267 267
268 if (sta->sta.supp_rates[band] != prev_rates) {
268#ifdef CONFIG_MAC80211_IBSS_DEBUG 269#ifdef CONFIG_MAC80211_IBSS_DEBUG
269 if (sta->sta.supp_rates[band] != prev_rates)
270 printk(KERN_DEBUG "%s: updated supp_rates set " 270 printk(KERN_DEBUG "%s: updated supp_rates set "
271 "for %pM based on beacon info (0x%llx | " 271 "for %pM based on beacon/probe_response "
272 "0x%llx -> 0x%llx)\n", 272 "(0x%x -> 0x%x)\n",
273 sdata->name, 273 sdata->name, sta->sta.addr,
274 sta->sta.addr, 274 prev_rates, sta->sta.supp_rates[band]);
275 (unsigned long long) prev_rates,
276 (unsigned long long) supp_rates,
277 (unsigned long long) sta->sta.supp_rates[band]);
278#endif 275#endif
276 rate_control_rate_init(sta);
277 }
279 rcu_read_unlock(); 278 rcu_read_unlock();
280 } else { 279 } else {
281 rcu_read_unlock(); 280 rcu_read_unlock();
@@ -371,6 +370,7 @@ static void ieee80211_rx_bss_info(struct ieee80211_sub_if_data *sdata,
371 sdata->name, mgmt->bssid); 370 sdata->name, mgmt->bssid);
372#endif 371#endif
373 ieee80211_sta_join_ibss(sdata, bss); 372 ieee80211_sta_join_ibss(sdata, bss);
373 supp_rates = ieee80211_sta_get_rates(local, elems, band);
374 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa, 374 ieee80211_ibss_add_sta(sdata, mgmt->bssid, mgmt->sa,
375 supp_rates, GFP_KERNEL); 375 supp_rates, GFP_KERNEL);
376 } 376 }
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 241533e1bc03..c9712f35e596 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -317,6 +317,7 @@ enum ieee80211_sta_flags {
317 IEEE80211_STA_MFP_ENABLED = BIT(6), 317 IEEE80211_STA_MFP_ENABLED = BIT(6),
318 IEEE80211_STA_UAPSD_ENABLED = BIT(7), 318 IEEE80211_STA_UAPSD_ENABLED = BIT(7),
319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8), 319 IEEE80211_STA_NULLFUNC_ACKED = BIT(8),
320 IEEE80211_STA_RESET_SIGNAL_AVE = BIT(9),
320}; 321};
321 322
322struct ieee80211_if_managed { 323struct ieee80211_if_managed {
@@ -327,7 +328,7 @@ struct ieee80211_if_managed {
327 struct work_struct work; 328 struct work_struct work;
328 struct work_struct monitor_work; 329 struct work_struct monitor_work;
329 struct work_struct chswitch_work; 330 struct work_struct chswitch_work;
330 struct work_struct beacon_loss_work; 331 struct work_struct beacon_connection_loss_work;
331 332
332 unsigned long probe_timeout; 333 unsigned long probe_timeout;
333 int probe_send_count; 334 int probe_send_count;
@@ -359,6 +360,24 @@ struct ieee80211_if_managed {
359 int wmm_last_param_set; 360 int wmm_last_param_set;
360 361
361 u8 use_4addr; 362 u8 use_4addr;
363
364 /* Signal strength from the last Beacon frame in the current BSS. */
365 int last_beacon_signal;
366
367 /*
368 * Weighted average of the signal strength from Beacon frames in the
369 * current BSS. This is in units of 1/16 of the signal unit to maintain
370 * accuracy and to speed up calculations, i.e., the value need to be
371 * divided by 16 to get the actual value.
372 */
373 int ave_beacon_signal;
374
375 /*
376 * Last Beacon frame signal strength average (ave_beacon_signal / 16)
377 * that triggered a cqm event. 0 indicates that no event has been
378 * generated for the current association.
379 */
380 int last_cqm_event_signal;
362}; 381};
363 382
364enum ieee80211_ibss_request { 383enum ieee80211_ibss_request {
@@ -646,8 +665,7 @@ struct ieee80211_local {
646 struct work_struct recalc_smps; 665 struct work_struct recalc_smps;
647 666
648 /* aggregated multicast list */ 667 /* aggregated multicast list */
649 struct dev_addr_list *mc_list; 668 struct netdev_hw_addr_list mc_list;
650 int mc_count;
651 669
652 bool tim_in_locked_section; /* see ieee80211_beacon_get() */ 670 bool tim_in_locked_section; /* see ieee80211_beacon_get() */
653 671
@@ -745,6 +763,7 @@ struct ieee80211_local {
745 int scan_channel_idx; 763 int scan_channel_idx;
746 int scan_ies_len; 764 int scan_ies_len;
747 765
766 unsigned long leave_oper_channel_time;
748 enum mac80211_scan_state next_scan_state; 767 enum mac80211_scan_state next_scan_state;
749 struct delayed_work scan_work; 768 struct delayed_work scan_work;
750 struct ieee80211_sub_if_data *scan_sdata; 769 struct ieee80211_sub_if_data *scan_sdata;
@@ -1078,8 +1097,6 @@ int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
1078 enum ieee80211_smps_mode smps, const u8 *da, 1097 enum ieee80211_smps_mode smps, const u8 *da,
1079 const u8 *bssid); 1098 const u8 *bssid);
1080 1099
1081void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *da,
1082 u16 tid, u16 initiator, u16 reason);
1083void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid, 1100void __ieee80211_stop_rx_ba_session(struct sta_info *sta, u16 tid,
1084 u16 initiator, u16 reason); 1101 u16 initiator, u16 reason);
1085void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta); 1102void ieee80211_sta_tear_down_BA_sessions(struct sta_info *sta);
@@ -1155,7 +1172,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
1155 int powersave); 1172 int powersave);
1156void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 1173void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
1157 struct ieee80211_hdr *hdr); 1174 struct ieee80211_hdr *hdr);
1158void ieee80211_beacon_loss_work(struct work_struct *work); 1175void ieee80211_beacon_connection_loss_work(struct work_struct *work);
1159 1176
1160void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw, 1177void ieee80211_wake_queues_by_reason(struct ieee80211_hw *hw,
1161 enum queue_stop_reason reason); 1178 enum queue_stop_reason reason);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index e08fa8eda1b3..50deb017fd6e 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -413,8 +413,7 @@ static int ieee80211_stop(struct net_device *dev)
413 413
414 netif_addr_lock_bh(dev); 414 netif_addr_lock_bh(dev);
415 spin_lock_bh(&local->filter_lock); 415 spin_lock_bh(&local->filter_lock);
416 __dev_addr_unsync(&local->mc_list, &local->mc_count, 416 __hw_addr_unsync(&local->mc_list, &dev->mc, dev->addr_len);
417 &dev->mc_list, &dev->mc_count);
418 spin_unlock_bh(&local->filter_lock); 417 spin_unlock_bh(&local->filter_lock);
419 netif_addr_unlock_bh(dev); 418 netif_addr_unlock_bh(dev);
420 419
@@ -487,7 +486,7 @@ static int ieee80211_stop(struct net_device *dev)
487 cancel_work_sync(&sdata->u.mgd.work); 486 cancel_work_sync(&sdata->u.mgd.work);
488 cancel_work_sync(&sdata->u.mgd.chswitch_work); 487 cancel_work_sync(&sdata->u.mgd.chswitch_work);
489 cancel_work_sync(&sdata->u.mgd.monitor_work); 488 cancel_work_sync(&sdata->u.mgd.monitor_work);
490 cancel_work_sync(&sdata->u.mgd.beacon_loss_work); 489 cancel_work_sync(&sdata->u.mgd.beacon_connection_loss_work);
491 490
492 /* 491 /*
493 * When we get here, the interface is marked down. 492 * When we get here, the interface is marked down.
@@ -597,8 +596,7 @@ static void ieee80211_set_multicast_list(struct net_device *dev)
597 sdata->flags ^= IEEE80211_SDATA_PROMISC; 596 sdata->flags ^= IEEE80211_SDATA_PROMISC;
598 } 597 }
599 spin_lock_bh(&local->filter_lock); 598 spin_lock_bh(&local->filter_lock);
600 __dev_addr_sync(&local->mc_list, &local->mc_count, 599 __hw_addr_sync(&local->mc_list, &dev->mc, dev->addr_len);
601 &dev->mc_list, &dev->mc_count);
602 spin_unlock_bh(&local->filter_lock); 600 spin_unlock_bh(&local->filter_lock);
603 ieee80211_queue_work(&local->hw, &local->reconfig_filter); 601 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
604} 602}
@@ -816,6 +814,118 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,
816 return 0; 814 return 0;
817} 815}
818 816
817static void ieee80211_assign_perm_addr(struct ieee80211_local *local,
818 struct net_device *dev,
819 enum nl80211_iftype type)
820{
821 struct ieee80211_sub_if_data *sdata;
822 u64 mask, start, addr, val, inc;
823 u8 *m;
824 u8 tmp_addr[ETH_ALEN];
825 int i;
826
827 /* default ... something at least */
828 memcpy(dev->perm_addr, local->hw.wiphy->perm_addr, ETH_ALEN);
829
830 if (is_zero_ether_addr(local->hw.wiphy->addr_mask) &&
831 local->hw.wiphy->n_addresses <= 1)
832 return;
833
834
835 mutex_lock(&local->iflist_mtx);
836
837 switch (type) {
838 case NL80211_IFTYPE_MONITOR:
839 /* doesn't matter */
840 break;
841 case NL80211_IFTYPE_WDS:
842 case NL80211_IFTYPE_AP_VLAN:
843 /* match up with an AP interface */
844 list_for_each_entry(sdata, &local->interfaces, list) {
845 if (sdata->vif.type != NL80211_IFTYPE_AP)
846 continue;
847 memcpy(dev->perm_addr, sdata->vif.addr, ETH_ALEN);
848 break;
849 }
850 /* keep default if no AP interface present */
851 break;
852 default:
853 /* assign a new address if possible -- try n_addresses first */
854 for (i = 0; i < local->hw.wiphy->n_addresses; i++) {
855 bool used = false;
856
857 list_for_each_entry(sdata, &local->interfaces, list) {
858 if (memcmp(local->hw.wiphy->addresses[i].addr,
859 sdata->vif.addr, ETH_ALEN) == 0) {
860 used = true;
861 break;
862 }
863 }
864
865 if (!used) {
866 memcpy(dev->perm_addr,
867 local->hw.wiphy->addresses[i].addr,
868 ETH_ALEN);
869 break;
870 }
871 }
872
873 /* try mask if available */
874 if (is_zero_ether_addr(local->hw.wiphy->addr_mask))
875 break;
876
877 m = local->hw.wiphy->addr_mask;
878 mask = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
879 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
880 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
881
882 if (__ffs64(mask) + hweight64(mask) != fls64(mask)) {
883 /* not a contiguous mask ... not handled now! */
884 printk(KERN_DEBUG "not contiguous\n");
885 break;
886 }
887
888 m = local->hw.wiphy->perm_addr;
889 start = ((u64)m[0] << 5*8) | ((u64)m[1] << 4*8) |
890 ((u64)m[2] << 3*8) | ((u64)m[3] << 2*8) |
891 ((u64)m[4] << 1*8) | ((u64)m[5] << 0*8);
892
893 inc = 1ULL<<__ffs64(mask);
894 val = (start & mask);
895 addr = (start & ~mask) | (val & mask);
896 do {
897 bool used = false;
898
899 tmp_addr[5] = addr >> 0*8;
900 tmp_addr[4] = addr >> 1*8;
901 tmp_addr[3] = addr >> 2*8;
902 tmp_addr[2] = addr >> 3*8;
903 tmp_addr[1] = addr >> 4*8;
904 tmp_addr[0] = addr >> 5*8;
905
906 val += inc;
907
908 list_for_each_entry(sdata, &local->interfaces, list) {
909 if (memcmp(tmp_addr, sdata->vif.addr,
910 ETH_ALEN) == 0) {
911 used = true;
912 break;
913 }
914 }
915
916 if (!used) {
917 memcpy(dev->perm_addr, tmp_addr, ETH_ALEN);
918 break;
919 }
920 addr = (start & ~mask) | (val & mask);
921 } while (addr != start);
922
923 break;
924 }
925
926 mutex_unlock(&local->iflist_mtx);
927}
928
819int ieee80211_if_add(struct ieee80211_local *local, const char *name, 929int ieee80211_if_add(struct ieee80211_local *local, const char *name,
820 struct net_device **new_dev, enum nl80211_iftype type, 930 struct net_device **new_dev, enum nl80211_iftype type,
821 struct vif_params *params) 931 struct vif_params *params)
@@ -845,8 +955,8 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
845 if (ret < 0) 955 if (ret < 0)
846 goto fail; 956 goto fail;
847 957
848 memcpy(ndev->dev_addr, local->hw.wiphy->perm_addr, ETH_ALEN); 958 ieee80211_assign_perm_addr(local, ndev, type);
849 memcpy(ndev->perm_addr, ndev->dev_addr, ETH_ALEN); 959 memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN);
850 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); 960 SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy));
851 961
852 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ 962 /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index b887e484ae04..011ee85bcd57 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -71,7 +71,7 @@ void ieee80211_configure_filter(struct ieee80211_local *local)
71 spin_lock_bh(&local->filter_lock); 71 spin_lock_bh(&local->filter_lock);
72 changed_flags = local->filter_flags ^ new_flags; 72 changed_flags = local->filter_flags ^ new_flags;
73 73
74 mc = drv_prepare_multicast(local, local->mc_count, local->mc_list); 74 mc = drv_prepare_multicast(local, &local->mc_list);
75 spin_unlock_bh(&local->filter_lock); 75 spin_unlock_bh(&local->filter_lock);
76 76
77 /* be a bit nasty */ 77 /* be a bit nasty */
@@ -309,6 +309,8 @@ void ieee80211_restart_hw(struct ieee80211_hw *hw)
309{ 309{
310 struct ieee80211_local *local = hw_to_local(hw); 310 struct ieee80211_local *local = hw_to_local(hw);
311 311
312 trace_api_restart_hw(local);
313
312 /* use this reason, __ieee80211_resume will unblock it */ 314 /* use this reason, __ieee80211_resume will unblock it */
313 ieee80211_stop_queues_by_reason(hw, 315 ieee80211_stop_queues_by_reason(hw,
314 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 316 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
@@ -388,6 +390,9 @@ struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
388 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; 390 local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
389 391
390 INIT_LIST_HEAD(&local->interfaces); 392 INIT_LIST_HEAD(&local->interfaces);
393
394 __hw_addr_init(&local->mc_list);
395
391 mutex_init(&local->iflist_mtx); 396 mutex_init(&local->iflist_mtx);
392 mutex_init(&local->scan_mtx); 397 mutex_init(&local->scan_mtx);
393 398
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 859ee5f3d941..7e93524459fc 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -601,10 +601,10 @@ static void ieee80211_mesh_rx_mgmt_action(struct ieee80211_sub_if_data *sdata,
601 struct ieee80211_rx_status *rx_status) 601 struct ieee80211_rx_status *rx_status)
602{ 602{
603 switch (mgmt->u.action.category) { 603 switch (mgmt->u.action.category) {
604 case MESH_PLINK_CATEGORY: 604 case WLAN_CATEGORY_MESH_PLINK:
605 mesh_rx_plink_frame(sdata, mgmt, len, rx_status); 605 mesh_rx_plink_frame(sdata, mgmt, len, rx_status);
606 break; 606 break;
607 case MESH_PATH_SEL_CATEGORY: 607 case WLAN_CATEGORY_MESH_PATH_SEL:
608 mesh_rx_path_sel_frame(sdata, mgmt, len); 608 mesh_rx_path_sel_frame(sdata, mgmt, len);
609 break; 609 break;
610 } 610 }
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 85562c59d7d6..c88087f1cd0f 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -209,8 +209,6 @@ struct mesh_rmc {
209#define MESH_MAX_MPATHS 1024 209#define MESH_MAX_MPATHS 1024
210 210
211/* Pending ANA approval */ 211/* Pending ANA approval */
212#define MESH_PLINK_CATEGORY 30
213#define MESH_PATH_SEL_CATEGORY 32
214#define MESH_PATH_SEL_ACTION 0 212#define MESH_PATH_SEL_ACTION 0
215 213
216/* PERR reason codes */ 214/* PERR reason codes */
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fefc45c4b4e8..d89ed7f2592b 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -132,7 +132,7 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 132 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
133 /* BSSID == SA */ 133 /* BSSID == SA */
134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); 134 memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
135 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 135 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 136 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
137 137
138 switch (action) { 138 switch (action) {
@@ -225,7 +225,7 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
225 memcpy(mgmt->da, ra, ETH_ALEN); 225 memcpy(mgmt->da, ra, ETH_ALEN);
226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 226 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
227 /* BSSID is left zeroed, wildcard value */ 227 /* BSSID is left zeroed, wildcard value */
228 mgmt->u.action.category = MESH_PATH_SEL_CATEGORY; 228 mgmt->u.action.category = WLAN_CATEGORY_MESH_PATH_SEL;
229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION; 229 mgmt->u.action.u.mesh_action.action_code = MESH_PATH_SEL_ACTION;
230 ie_len = 15; 230 ie_len = 15;
231 pos = skb_put(skb, 2 + ie_len); 231 pos = skb_put(skb, 2 + ie_len);
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7b7080e2b49f..3cd5f7b5d693 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
172 memcpy(mgmt->da, da, ETH_ALEN); 172 memcpy(mgmt->da, da, ETH_ALEN);
173 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN); 173 memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
174 /* BSSID is left zeroed, wildcard value */ 174 /* BSSID is left zeroed, wildcard value */
175 mgmt->u.action.category = MESH_PLINK_CATEGORY; 175 mgmt->u.action.category = WLAN_CATEGORY_MESH_PLINK;
176 mgmt->u.action.u.plink_action.action_code = action; 176 mgmt->u.action.u.plink_action.action_code = action;
177 177
178 if (action == PLINK_CLOSE) 178 if (action == PLINK_CLOSE)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 4aefa6dc3091..3133681bdaa0 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -47,6 +47,13 @@
47 */ 47 */
48#define IEEE80211_PROBE_WAIT (HZ / 2) 48#define IEEE80211_PROBE_WAIT (HZ / 2)
49 49
50/*
51 * Weight given to the latest Beacon frame when calculating average signal
52 * strength for Beacon frames received in the current BSS. This must be
53 * between 1 and 15.
54 */
55#define IEEE80211_SIGNAL_AVE_WEIGHT 3
56
50#define TMR_RUNNING_TIMER 0 57#define TMR_RUNNING_TIMER 0
51#define TMR_RUNNING_CHANSW 1 58#define TMR_RUNNING_CHANSW 1
52 59
@@ -206,7 +213,7 @@ static u32 ieee80211_enable_ht(struct ieee80211_sub_if_data *sdata,
206 213
207static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata, 214static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
208 const u8 *bssid, u16 stype, u16 reason, 215 const u8 *bssid, u16 stype, u16 reason,
209 void *cookie) 216 void *cookie, bool send_frame)
210{ 217{
211 struct ieee80211_local *local = sdata->local; 218 struct ieee80211_local *local = sdata->local;
212 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 219 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -243,7 +250,11 @@ static void ieee80211_send_deauth_disassoc(struct ieee80211_sub_if_data *sdata,
243 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len); 250 cfg80211_send_disassoc(sdata->dev, (u8 *)mgmt, skb->len);
244 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED)) 251 if (!(ifmgd->flags & IEEE80211_STA_MFP_ENABLED))
245 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT; 252 IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
246 ieee80211_tx_skb(sdata, skb); 253
254 if (send_frame)
255 ieee80211_tx_skb(sdata, skb);
256 else
257 kfree_skb(skb);
247} 258}
248 259
249void ieee80211_send_pspoll(struct ieee80211_local *local, 260void ieee80211_send_pspoll(struct ieee80211_local *local,
@@ -592,6 +603,9 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
592 int count; 603 int count;
593 u8 *pos, uapsd_queues = 0; 604 u8 *pos, uapsd_queues = 0;
594 605
606 if (!local->ops->conf_tx)
607 return;
608
595 if (local->hw.queues < 4) 609 if (local->hw.queues < 4)
596 return; 610 return;
597 611
@@ -666,11 +680,15 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
666 params.aifs, params.cw_min, params.cw_max, params.txop, 680 params.aifs, params.cw_min, params.cw_max, params.txop,
667 params.uapsd); 681 params.uapsd);
668#endif 682#endif
669 if (drv_conf_tx(local, queue, &params) && local->ops->conf_tx) 683 if (drv_conf_tx(local, queue, &params))
670 printk(KERN_DEBUG "%s: failed to set TX queue " 684 printk(KERN_DEBUG "%s: failed to set TX queue "
671 "parameters for queue %d\n", 685 "parameters for queue %d\n",
672 wiphy_name(local->hw.wiphy), queue); 686 wiphy_name(local->hw.wiphy), queue);
673 } 687 }
688
689 /* enable WMM or activate new settings */
690 local->hw.conf.flags |= IEEE80211_CONF_QOS;
691 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
674} 692}
675 693
676static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 694static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
@@ -731,6 +749,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
731 sdata->u.mgd.associated = cbss; 749 sdata->u.mgd.associated = cbss;
732 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN); 750 memcpy(sdata->u.mgd.bssid, cbss->bssid, ETH_ALEN);
733 751
752 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
753
734 /* just to be sure */ 754 /* just to be sure */
735 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 755 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
736 IEEE80211_STA_BEACON_POLL); 756 IEEE80211_STA_BEACON_POLL);
@@ -756,6 +776,11 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
756 /* And the BSSID changed - we're associated now */ 776 /* And the BSSID changed - we're associated now */
757 bss_info_changed |= BSS_CHANGED_BSSID; 777 bss_info_changed |= BSS_CHANGED_BSSID;
758 778
779 /* Tell the driver to monitor connection quality (if supported) */
780 if ((local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI) &&
781 sdata->vif.bss_conf.cqm_rssi_thold)
782 bss_info_changed |= BSS_CHANGED_CQM;
783
759 ieee80211_bss_info_change_notify(sdata, bss_info_changed); 784 ieee80211_bss_info_change_notify(sdata, bss_info_changed);
760 785
761 mutex_lock(&local->iflist_mtx); 786 mutex_lock(&local->iflist_mtx);
@@ -767,7 +792,8 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
767 netif_carrier_on(sdata->dev); 792 netif_carrier_on(sdata->dev);
768} 793}
769 794
770static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata) 795static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
796 bool remove_sta)
771{ 797{
772 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 798 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
773 struct ieee80211_local *local = sdata->local; 799 struct ieee80211_local *local = sdata->local;
@@ -840,7 +866,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata)
840 changed |= BSS_CHANGED_BSSID; 866 changed |= BSS_CHANGED_BSSID;
841 ieee80211_bss_info_change_notify(sdata, changed); 867 ieee80211_bss_info_change_notify(sdata, changed);
842 868
843 sta_info_destroy_addr(sdata, bssid); 869 if (remove_sta)
870 sta_info_destroy_addr(sdata, bssid);
844} 871}
845 872
846void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, 873void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
@@ -857,6 +884,9 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata,
857 if (is_multicast_ether_addr(hdr->addr1)) 884 if (is_multicast_ether_addr(hdr->addr1))
858 return; 885 return;
859 886
887 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
888 return;
889
860 mod_timer(&sdata->u.mgd.conn_mon_timer, 890 mod_timer(&sdata->u.mgd.conn_mon_timer,
861 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME)); 891 round_jiffies_up(jiffies + IEEE80211_CONNECTION_IDLE_TIME));
862} 892}
@@ -934,23 +964,72 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
934 mutex_unlock(&ifmgd->mtx); 964 mutex_unlock(&ifmgd->mtx);
935} 965}
936 966
937void ieee80211_beacon_loss_work(struct work_struct *work) 967static void __ieee80211_connection_loss(struct ieee80211_sub_if_data *sdata)
968{
969 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
970 struct ieee80211_local *local = sdata->local;
971 u8 bssid[ETH_ALEN];
972
973 mutex_lock(&ifmgd->mtx);
974 if (!ifmgd->associated) {
975 mutex_unlock(&ifmgd->mtx);
976 return;
977 }
978
979 memcpy(bssid, ifmgd->associated->bssid, ETH_ALEN);
980
981 printk(KERN_DEBUG "Connection to AP %pM lost.\n", bssid);
982
983 ieee80211_set_disassoc(sdata, true);
984 ieee80211_recalc_idle(local);
985 mutex_unlock(&ifmgd->mtx);
986 /*
987 * must be outside lock due to cfg80211,
988 * but that's not a problem.
989 */
990 ieee80211_send_deauth_disassoc(sdata, bssid,
991 IEEE80211_STYPE_DEAUTH,
992 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
993 NULL, true);
994}
995
996void ieee80211_beacon_connection_loss_work(struct work_struct *work)
938{ 997{
939 struct ieee80211_sub_if_data *sdata = 998 struct ieee80211_sub_if_data *sdata =
940 container_of(work, struct ieee80211_sub_if_data, 999 container_of(work, struct ieee80211_sub_if_data,
941 u.mgd.beacon_loss_work); 1000 u.mgd.beacon_connection_loss_work);
942 1001
943 ieee80211_mgd_probe_ap(sdata, true); 1002 if (sdata->local->hw.flags & IEEE80211_HW_CONNECTION_MONITOR)
1003 __ieee80211_connection_loss(sdata);
1004 else
1005 ieee80211_mgd_probe_ap(sdata, true);
944} 1006}
945 1007
946void ieee80211_beacon_loss(struct ieee80211_vif *vif) 1008void ieee80211_beacon_loss(struct ieee80211_vif *vif)
947{ 1009{
948 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif); 1010 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1011 struct ieee80211_hw *hw = &sdata->local->hw;
1012
1013 trace_api_beacon_loss(sdata);
949 1014
950 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); 1015 WARN_ON(hw->flags & IEEE80211_HW_CONNECTION_MONITOR);
1016 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
951} 1017}
952EXPORT_SYMBOL(ieee80211_beacon_loss); 1018EXPORT_SYMBOL(ieee80211_beacon_loss);
953 1019
1020void ieee80211_connection_loss(struct ieee80211_vif *vif)
1021{
1022 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
1023 struct ieee80211_hw *hw = &sdata->local->hw;
1024
1025 trace_api_connection_loss(sdata);
1026
1027 WARN_ON(!(hw->flags & IEEE80211_HW_CONNECTION_MONITOR));
1028 ieee80211_queue_work(hw, &sdata->u.mgd.beacon_connection_loss_work);
1029}
1030EXPORT_SYMBOL(ieee80211_connection_loss);
1031
1032
954static enum rx_mgmt_action __must_check 1033static enum rx_mgmt_action __must_check
955ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata, 1034ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
956 struct ieee80211_mgmt *mgmt, size_t len) 1035 struct ieee80211_mgmt *mgmt, size_t len)
@@ -971,7 +1050,7 @@ ieee80211_rx_mgmt_deauth(struct ieee80211_sub_if_data *sdata,
971 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n", 1050 printk(KERN_DEBUG "%s: deauthenticated from %pM (Reason: %u)\n",
972 sdata->name, bssid, reason_code); 1051 sdata->name, bssid, reason_code);
973 1052
974 ieee80211_set_disassoc(sdata); 1053 ieee80211_set_disassoc(sdata, true);
975 ieee80211_recalc_idle(sdata->local); 1054 ieee80211_recalc_idle(sdata->local);
976 1055
977 return RX_MGMT_CFG80211_DEAUTH; 1056 return RX_MGMT_CFG80211_DEAUTH;
@@ -1001,7 +1080,7 @@ ieee80211_rx_mgmt_disassoc(struct ieee80211_sub_if_data *sdata,
1001 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n", 1080 printk(KERN_DEBUG "%s: disassociated from %pM (Reason: %u)\n",
1002 sdata->name, mgmt->sa, reason_code); 1081 sdata->name, mgmt->sa, reason_code);
1003 1082
1004 ieee80211_set_disassoc(sdata); 1083 ieee80211_set_disassoc(sdata, true);
1005 ieee80211_recalc_idle(sdata->local); 1084 ieee80211_recalc_idle(sdata->local);
1006 return RX_MGMT_CFG80211_DISASSOC; 1085 return RX_MGMT_CFG80211_DISASSOC;
1007} 1086}
@@ -1293,6 +1372,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1293 struct ieee80211_rx_status *rx_status) 1372 struct ieee80211_rx_status *rx_status)
1294{ 1373{
1295 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 1374 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1375 struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
1296 size_t baselen; 1376 size_t baselen;
1297 struct ieee802_11_elems elems; 1377 struct ieee802_11_elems elems;
1298 struct ieee80211_local *local = sdata->local; 1378 struct ieee80211_local *local = sdata->local;
@@ -1328,6 +1408,41 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
1328 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0) 1408 if (memcmp(bssid, mgmt->bssid, ETH_ALEN) != 0)
1329 return; 1409 return;
1330 1410
1411 /* Track average RSSI from the Beacon frames of the current AP */
1412 ifmgd->last_beacon_signal = rx_status->signal;
1413 if (ifmgd->flags & IEEE80211_STA_RESET_SIGNAL_AVE) {
1414 ifmgd->flags &= ~IEEE80211_STA_RESET_SIGNAL_AVE;
1415 ifmgd->ave_beacon_signal = rx_status->signal;
1416 ifmgd->last_cqm_event_signal = 0;
1417 } else {
1418 ifmgd->ave_beacon_signal =
1419 (IEEE80211_SIGNAL_AVE_WEIGHT * rx_status->signal * 16 +
1420 (16 - IEEE80211_SIGNAL_AVE_WEIGHT) *
1421 ifmgd->ave_beacon_signal) / 16;
1422 }
1423 if (bss_conf->cqm_rssi_thold &&
1424 !(local->hw.flags & IEEE80211_HW_SUPPORTS_CQM_RSSI)) {
1425 int sig = ifmgd->ave_beacon_signal / 16;
1426 int last_event = ifmgd->last_cqm_event_signal;
1427 int thold = bss_conf->cqm_rssi_thold;
1428 int hyst = bss_conf->cqm_rssi_hyst;
1429 if (sig < thold &&
1430 (last_event == 0 || sig < last_event - hyst)) {
1431 ifmgd->last_cqm_event_signal = sig;
1432 ieee80211_cqm_rssi_notify(
1433 &sdata->vif,
1434 NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
1435 GFP_KERNEL);
1436 } else if (sig > thold &&
1437 (last_event == 0 || sig > last_event + hyst)) {
1438 ifmgd->last_cqm_event_signal = sig;
1439 ieee80211_cqm_rssi_notify(
1440 &sdata->vif,
1441 NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
1442 GFP_KERNEL);
1443 }
1444 }
1445
1331 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) { 1446 if (ifmgd->flags & IEEE80211_STA_BEACON_POLL) {
1332#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 1447#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
1333 if (net_ratelimit()) { 1448 if (net_ratelimit()) {
@@ -1613,7 +1728,7 @@ static void ieee80211_sta_work(struct work_struct *work)
1613 printk(KERN_DEBUG "No probe response from AP %pM" 1728 printk(KERN_DEBUG "No probe response from AP %pM"
1614 " after %dms, disconnecting.\n", 1729 " after %dms, disconnecting.\n",
1615 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ); 1730 bssid, (1000 * IEEE80211_PROBE_WAIT)/HZ);
1616 ieee80211_set_disassoc(sdata); 1731 ieee80211_set_disassoc(sdata, true);
1617 ieee80211_recalc_idle(local); 1732 ieee80211_recalc_idle(local);
1618 mutex_unlock(&ifmgd->mtx); 1733 mutex_unlock(&ifmgd->mtx);
1619 /* 1734 /*
@@ -1623,7 +1738,7 @@ static void ieee80211_sta_work(struct work_struct *work)
1623 ieee80211_send_deauth_disassoc(sdata, bssid, 1738 ieee80211_send_deauth_disassoc(sdata, bssid,
1624 IEEE80211_STYPE_DEAUTH, 1739 IEEE80211_STYPE_DEAUTH,
1625 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, 1740 WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
1626 NULL); 1741 NULL, true);
1627 mutex_lock(&ifmgd->mtx); 1742 mutex_lock(&ifmgd->mtx);
1628 } 1743 }
1629 } 1744 }
@@ -1640,7 +1755,8 @@ static void ieee80211_sta_bcn_mon_timer(unsigned long data)
1640 if (local->quiescing) 1755 if (local->quiescing)
1641 return; 1756 return;
1642 1757
1643 ieee80211_queue_work(&sdata->local->hw, &sdata->u.mgd.beacon_loss_work); 1758 ieee80211_queue_work(&sdata->local->hw,
1759 &sdata->u.mgd.beacon_connection_loss_work);
1644} 1760}
1645 1761
1646static void ieee80211_sta_conn_mon_timer(unsigned long data) 1762static void ieee80211_sta_conn_mon_timer(unsigned long data)
@@ -1692,7 +1808,7 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)
1692 */ 1808 */
1693 1809
1694 cancel_work_sync(&ifmgd->work); 1810 cancel_work_sync(&ifmgd->work);
1695 cancel_work_sync(&ifmgd->beacon_loss_work); 1811 cancel_work_sync(&ifmgd->beacon_connection_loss_work);
1696 if (del_timer_sync(&ifmgd->timer)) 1812 if (del_timer_sync(&ifmgd->timer))
1697 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); 1813 set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running);
1698 1814
@@ -1726,7 +1842,8 @@ void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
1726 INIT_WORK(&ifmgd->work, ieee80211_sta_work); 1842 INIT_WORK(&ifmgd->work, ieee80211_sta_work);
1727 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work); 1843 INIT_WORK(&ifmgd->monitor_work, ieee80211_sta_monitor_work);
1728 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work); 1844 INIT_WORK(&ifmgd->chswitch_work, ieee80211_chswitch_work);
1729 INIT_WORK(&ifmgd->beacon_loss_work, ieee80211_beacon_loss_work); 1845 INIT_WORK(&ifmgd->beacon_connection_loss_work,
1846 ieee80211_beacon_connection_loss_work);
1730 setup_timer(&ifmgd->timer, ieee80211_sta_timer, 1847 setup_timer(&ifmgd->timer, ieee80211_sta_timer,
1731 (unsigned long) sdata); 1848 (unsigned long) sdata);
1732 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 1849 setup_timer(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer,
@@ -1805,6 +1922,9 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
1805 struct ieee80211_work *wk; 1922 struct ieee80211_work *wk;
1806 u16 auth_alg; 1923 u16 auth_alg;
1807 1924
1925 if (req->local_state_change)
1926 return 0; /* no need to update mac80211 state */
1927
1808 switch (req->auth_type) { 1928 switch (req->auth_type) {
1809 case NL80211_AUTHTYPE_OPEN_SYSTEM: 1929 case NL80211_AUTHTYPE_OPEN_SYSTEM:
1810 auth_alg = WLAN_AUTH_OPEN; 1930 auth_alg = WLAN_AUTH_OPEN;
@@ -1913,7 +2033,7 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,
1913 } 2033 }
1914 2034
1915 /* Trying to reassociate - clear previous association state */ 2035 /* Trying to reassociate - clear previous association state */
1916 ieee80211_set_disassoc(sdata); 2036 ieee80211_set_disassoc(sdata, true);
1917 } 2037 }
1918 mutex_unlock(&ifmgd->mtx); 2038 mutex_unlock(&ifmgd->mtx);
1919 2039
@@ -2017,7 +2137,7 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2017 2137
2018 if (ifmgd->associated == req->bss) { 2138 if (ifmgd->associated == req->bss) {
2019 bssid = req->bss->bssid; 2139 bssid = req->bss->bssid;
2020 ieee80211_set_disassoc(sdata); 2140 ieee80211_set_disassoc(sdata, true);
2021 mutex_unlock(&ifmgd->mtx); 2141 mutex_unlock(&ifmgd->mtx);
2022 } else { 2142 } else {
2023 bool not_auth_yet = false; 2143 bool not_auth_yet = false;
@@ -2060,9 +2180,9 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
2060 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n", 2180 printk(KERN_DEBUG "%s: deauthenticating from %pM by local choice (reason=%d)\n",
2061 sdata->name, bssid, req->reason_code); 2181 sdata->name, bssid, req->reason_code);
2062 2182
2063 ieee80211_send_deauth_disassoc(sdata, bssid, 2183 ieee80211_send_deauth_disassoc(sdata, bssid, IEEE80211_STYPE_DEAUTH,
2064 IEEE80211_STYPE_DEAUTH, req->reason_code, 2184 req->reason_code, cookie,
2065 cookie); 2185 !req->local_state_change);
2066 2186
2067 ieee80211_recalc_idle(sdata->local); 2187 ieee80211_recalc_idle(sdata->local);
2068 2188
@@ -2074,6 +2194,7 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2074 void *cookie) 2194 void *cookie)
2075{ 2195{
2076 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2196 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2197 u8 bssid[ETH_ALEN];
2077 2198
2078 mutex_lock(&ifmgd->mtx); 2199 mutex_lock(&ifmgd->mtx);
2079 2200
@@ -2091,13 +2212,15 @@ int ieee80211_mgd_disassoc(struct ieee80211_sub_if_data *sdata,
2091 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n", 2212 printk(KERN_DEBUG "%s: disassociating from %pM by local choice (reason=%d)\n",
2092 sdata->name, req->bss->bssid, req->reason_code); 2213 sdata->name, req->bss->bssid, req->reason_code);
2093 2214
2094 ieee80211_set_disassoc(sdata); 2215 memcpy(bssid, req->bss->bssid, ETH_ALEN);
2216 ieee80211_set_disassoc(sdata, false);
2095 2217
2096 mutex_unlock(&ifmgd->mtx); 2218 mutex_unlock(&ifmgd->mtx);
2097 2219
2098 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid, 2220 ieee80211_send_deauth_disassoc(sdata, req->bss->bssid,
2099 IEEE80211_STYPE_DISASSOC, req->reason_code, 2221 IEEE80211_STYPE_DISASSOC, req->reason_code,
2100 cookie); 2222 cookie, !req->local_state_change);
2223 sta_info_destroy_addr(sdata, bssid);
2101 2224
2102 ieee80211_recalc_idle(sdata->local); 2225 ieee80211_recalc_idle(sdata->local);
2103 2226
@@ -2138,3 +2261,15 @@ int ieee80211_mgd_action(struct ieee80211_sub_if_data *sdata,
2138 *cookie = (unsigned long) skb; 2261 *cookie = (unsigned long) skb;
2139 return 0; 2262 return 0;
2140} 2263}
2264
2265void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif,
2266 enum nl80211_cqm_rssi_threshold_event rssi_event,
2267 gfp_t gfp)
2268{
2269 struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
2270
2271 trace_api_cqm_rssi_notify(sdata, rssi_event);
2272
2273 cfg80211_cqm_rssi_notify(sdata->dev, rssi_event, gfp);
2274}
2275EXPORT_SYMBOL(ieee80211_cqm_rssi_notify);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 0e64484e861c..75202b295a4e 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -46,7 +46,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
46 46
47 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 47 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
48 list_for_each_entry_rcu(sta, &local->sta_list, list) { 48 list_for_each_entry_rcu(sta, &local->sta_list, list) {
49 set_sta_flags(sta, WLAN_STA_SUSPEND); 49 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
50 ieee80211_sta_tear_down_BA_sessions(sta); 50 ieee80211_sta_tear_down_BA_sessions(sta);
51 } 51 }
52 } 52 }
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 818abfae9007..f65ce6dcc8e2 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -542,7 +542,7 @@ minstrel_free(void *priv)
542 kfree(priv); 542 kfree(priv);
543} 543}
544 544
545static struct rate_control_ops mac80211_minstrel = { 545struct rate_control_ops mac80211_minstrel = {
546 .name = "minstrel", 546 .name = "minstrel",
547 .tx_status = minstrel_tx_status, 547 .tx_status = minstrel_tx_status,
548 .get_rate = minstrel_get_rate, 548 .get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index 38bf4168fc3a..0f5a83370aa6 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -80,7 +80,18 @@ struct minstrel_priv {
80 unsigned int lookaround_rate_mrr; 80 unsigned int lookaround_rate_mrr;
81}; 81};
82 82
83struct minstrel_debugfs_info {
84 size_t len;
85 char buf[];
86};
87
88extern struct rate_control_ops mac80211_minstrel;
83void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir); 89void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
84void minstrel_remove_sta_debugfs(void *priv, void *priv_sta); 90void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
85 91
92/* debugfs */
93int minstrel_stats_open(struct inode *inode, struct file *file);
94ssize_t minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos);
95int minstrel_stats_release(struct inode *inode, struct file *file);
96
86#endif 97#endif
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 0e1f12b1b6dd..241e76f3fdf2 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -53,21 +53,15 @@
53#include <net/mac80211.h> 53#include <net/mac80211.h>
54#include "rc80211_minstrel.h" 54#include "rc80211_minstrel.h"
55 55
56struct minstrel_stats_info { 56int
57 struct minstrel_sta_info *mi;
58 char buf[4096];
59 size_t len;
60};
61
62static int
63minstrel_stats_open(struct inode *inode, struct file *file) 57minstrel_stats_open(struct inode *inode, struct file *file)
64{ 58{
65 struct minstrel_sta_info *mi = inode->i_private; 59 struct minstrel_sta_info *mi = inode->i_private;
66 struct minstrel_stats_info *ms; 60 struct minstrel_debugfs_info *ms;
67 unsigned int i, tp, prob, eprob; 61 unsigned int i, tp, prob, eprob;
68 char *p; 62 char *p;
69 63
70 ms = kmalloc(sizeof(*ms), GFP_KERNEL); 64 ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL);
71 if (!ms) 65 if (!ms)
72 return -ENOMEM; 66 return -ENOMEM;
73 67
@@ -107,36 +101,19 @@ minstrel_stats_open(struct inode *inode, struct file *file)
107 return 0; 101 return 0;
108} 102}
109 103
110static ssize_t 104ssize_t
111minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *o) 105minstrel_stats_read(struct file *file, char __user *buf, size_t len, loff_t *ppos)
112{ 106{
113 struct minstrel_stats_info *ms; 107 struct minstrel_debugfs_info *ms;
114 char *src;
115 108
116 ms = file->private_data; 109 ms = file->private_data;
117 src = ms->buf; 110 return simple_read_from_buffer(buf, len, ppos, ms->buf, ms->len);
118
119 len = min(len, ms->len);
120 if (len <= *o)
121 return 0;
122
123 src += *o;
124 len -= *o;
125 *o += len;
126
127 if (copy_to_user(buf, src, len))
128 return -EFAULT;
129
130 return len;
131} 111}
132 112
133static int 113int
134minstrel_stats_release(struct inode *inode, struct file *file) 114minstrel_stats_release(struct inode *inode, struct file *file)
135{ 115{
136 struct minstrel_stats_info *ms = file->private_data; 116 kfree(file->private_data);
137
138 kfree(ms);
139
140 return 0; 117 return 0;
141} 118}
142 119
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 04ea07f0e78a..72efbd87c1eb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -39,7 +39,7 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
39{ 39{
40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) { 40 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
41 if (likely(skb->len > FCS_LEN)) 41 if (likely(skb->len > FCS_LEN))
42 skb_trim(skb, skb->len - FCS_LEN); 42 __pskb_trim(skb, skb->len - FCS_LEN);
43 else { 43 else {
44 /* driver bug */ 44 /* driver bug */
45 WARN_ON(1); 45 WARN_ON(1);
@@ -179,14 +179,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
179 pos++; 179 pos++;
180 } 180 }
181 181
182 /* IEEE80211_RADIOTAP_DBM_ANTNOISE */
183 if (local->hw.flags & IEEE80211_HW_NOISE_DBM) {
184 *pos = status->noise;
185 rthdr->it_present |=
186 cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTNOISE);
187 pos++;
188 }
189
190 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */ 182 /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
191 183
192 /* IEEE80211_RADIOTAP_ANTENNA */ 184 /* IEEE80211_RADIOTAP_ANTENNA */
@@ -236,6 +228,12 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
236 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) 228 if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
237 present_fcs_len = FCS_LEN; 229 present_fcs_len = FCS_LEN;
238 230
231 /* make sure hdr->frame_control is on the linear part */
232 if (!pskb_may_pull(origskb, 2)) {
233 dev_kfree_skb(origskb);
234 return NULL;
235 }
236
239 if (!local->monitors) { 237 if (!local->monitors) {
240 if (should_drop_frame(origskb, present_fcs_len)) { 238 if (should_drop_frame(origskb, present_fcs_len)) {
241 dev_kfree_skb(origskb); 239 dev_kfree_skb(origskb);
@@ -493,7 +491,7 @@ ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
493 491
494 if (ieee80211_is_action(hdr->frame_control)) { 492 if (ieee80211_is_action(hdr->frame_control)) {
495 mgmt = (struct ieee80211_mgmt *)hdr; 493 mgmt = (struct ieee80211_mgmt *)hdr;
496 if (mgmt->u.action.category != MESH_PLINK_CATEGORY) 494 if (mgmt->u.action.category != WLAN_CATEGORY_MESH_PLINK)
497 return RX_DROP_MONITOR; 495 return RX_DROP_MONITOR;
498 return RX_CONTINUE; 496 return RX_CONTINUE;
499 } 497 }
@@ -723,14 +721,16 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
723 721
724 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; 722 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
725 723
726 if (sta->ampdu_mlme.tid_state_rx[tid] != HT_AGG_STATE_OPERATIONAL) 724 spin_lock(&sta->lock);
727 goto dont_reorder; 725
726 if (!sta->ampdu_mlme.tid_active_rx[tid])
727 goto dont_reorder_unlock;
728 728
729 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid]; 729 tid_agg_rx = sta->ampdu_mlme.tid_rx[tid];
730 730
731 /* qos null data frames are excluded */ 731 /* qos null data frames are excluded */
732 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC))) 732 if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
733 goto dont_reorder; 733 goto dont_reorder_unlock;
734 734
735 /* new, potentially un-ordered, ampdu frame - process it */ 735 /* new, potentially un-ordered, ampdu frame - process it */
736 736
@@ -742,15 +742,20 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
742 /* if this mpdu is fragmented - terminate rx aggregation session */ 742 /* if this mpdu is fragmented - terminate rx aggregation session */
743 sc = le16_to_cpu(hdr->seq_ctrl); 743 sc = le16_to_cpu(hdr->seq_ctrl);
744 if (sc & IEEE80211_SCTL_FRAG) { 744 if (sc & IEEE80211_SCTL_FRAG) {
745 ieee80211_sta_stop_rx_ba_session(sta->sdata, sta->sta.addr, 745 spin_unlock(&sta->lock);
746 tid, 0, WLAN_REASON_QSTA_REQUIRE_SETUP); 746 __ieee80211_stop_rx_ba_session(sta, tid, WLAN_BACK_RECIPIENT,
747 WLAN_REASON_QSTA_REQUIRE_SETUP);
747 dev_kfree_skb(skb); 748 dev_kfree_skb(skb);
748 return; 749 return;
749 } 750 }
750 751
751 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) 752 if (ieee80211_sta_manage_reorder_buf(hw, tid_agg_rx, skb, frames)) {
753 spin_unlock(&sta->lock);
752 return; 754 return;
755 }
753 756
757 dont_reorder_unlock:
758 spin_unlock(&sta->lock);
754 dont_reorder: 759 dont_reorder:
755 __skb_queue_tail(frames, skb); 760 __skb_queue_tail(frames, skb);
756} 761}
@@ -897,6 +902,7 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
897 rx->key = key; 902 rx->key = key;
898 return RX_CONTINUE; 903 return RX_CONTINUE;
899 } else { 904 } else {
905 u8 keyid;
900 /* 906 /*
901 * The device doesn't give us the IV so we won't be 907 * The device doesn't give us the IV so we won't be
902 * able to look up the key. That's ok though, we 908 * able to look up the key. That's ok though, we
@@ -919,7 +925,8 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
919 * no need to call ieee80211_wep_get_keyidx, 925 * no need to call ieee80211_wep_get_keyidx,
920 * it verifies a bunch of things we've done already 926 * it verifies a bunch of things we've done already
921 */ 927 */
922 keyidx = rx->skb->data[hdrlen + 3] >> 6; 928 skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
929 keyidx = keyid >> 6;
923 930
924 rx->key = rcu_dereference(rx->sdata->keys[keyidx]); 931 rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
925 932
@@ -940,6 +947,11 @@ ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
940 return RX_DROP_MONITOR; 947 return RX_DROP_MONITOR;
941 } 948 }
942 949
950 if (skb_linearize(rx->skb))
951 return RX_DROP_UNUSABLE;
952
953 hdr = (struct ieee80211_hdr *)rx->skb->data;
954
943 /* Check for weak IVs if possible */ 955 /* Check for weak IVs if possible */
944 if (rx->sta && rx->key->conf.alg == ALG_WEP && 956 if (rx->sta && rx->key->conf.alg == ALG_WEP &&
945 ieee80211_is_data(hdr->frame_control) && 957 ieee80211_is_data(hdr->frame_control) &&
@@ -1078,7 +1090,6 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
1078 sta->rx_fragments++; 1090 sta->rx_fragments++;
1079 sta->rx_bytes += rx->skb->len; 1091 sta->rx_bytes += rx->skb->len;
1080 sta->last_signal = status->signal; 1092 sta->last_signal = status->signal;
1081 sta->last_noise = status->noise;
1082 1093
1083 /* 1094 /*
1084 * Change STA power saving mode only at the end of a frame 1095 * Change STA power saving mode only at the end of a frame
@@ -1241,6 +1252,9 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1241 } 1252 }
1242 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1253 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1243 1254
1255 if (skb_linearize(rx->skb))
1256 return RX_DROP_UNUSABLE;
1257
1244 seq = (sc & IEEE80211_SCTL_SEQ) >> 4; 1258 seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
1245 1259
1246 if (frag == 0) { 1260 if (frag == 0) {
@@ -1406,21 +1420,24 @@ static int
1406ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx) 1420ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
1407{ 1421{
1408 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; 1422 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
1423 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
1409 __le16 fc = hdr->frame_control; 1424 __le16 fc = hdr->frame_control;
1410 int res;
1411 1425
1412 res = ieee80211_drop_unencrypted(rx, fc); 1426 /*
1413 if (unlikely(res)) 1427 * Pass through unencrypted frames if the hardware has
1414 return res; 1428 * decrypted them already.
1429 */
1430 if (status->flag & RX_FLAG_DECRYPTED)
1431 return 0;
1415 1432
1416 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) { 1433 if (rx->sta && test_sta_flags(rx->sta, WLAN_STA_MFP)) {
1417 if (unlikely(ieee80211_is_unicast_robust_mgmt_frame(rx->skb) && 1434 if (unlikely(!ieee80211_has_protected(fc) &&
1435 ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
1418 rx->key)) 1436 rx->key))
1419 return -EACCES; 1437 return -EACCES;
1420 /* BIP does not use Protected field, so need to check MMIE */ 1438 /* BIP does not use Protected field, so need to check MMIE */
1421 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) && 1439 if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
1422 ieee80211_get_mmie_keyidx(rx->skb) < 0 && 1440 ieee80211_get_mmie_keyidx(rx->skb) < 0))
1423 rx->key))
1424 return -EACCES; 1441 return -EACCES;
1425 /* 1442 /*
1426 * When using MFP, Action frames are not allowed prior to 1443 * When using MFP, Action frames are not allowed prior to
@@ -1598,6 +1615,9 @@ ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
1598 skb->dev = dev; 1615 skb->dev = dev;
1599 __skb_queue_head_init(&frame_list); 1616 __skb_queue_head_init(&frame_list);
1600 1617
1618 if (skb_linearize(skb))
1619 return RX_DROP_UNUSABLE;
1620
1601 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr, 1621 ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
1602 rx->sdata->vif.type, 1622 rx->sdata->vif.type,
1603 rx->local->hw.extra_tx_headroom); 1623 rx->local->hw.extra_tx_headroom);
@@ -1796,10 +1816,12 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1796 if (ieee80211_is_back_req(bar->frame_control)) { 1816 if (ieee80211_is_back_req(bar->frame_control)) {
1797 if (!rx->sta) 1817 if (!rx->sta)
1798 return RX_DROP_MONITOR; 1818 return RX_DROP_MONITOR;
1819 spin_lock(&rx->sta->lock);
1799 tid = le16_to_cpu(bar->control) >> 12; 1820 tid = le16_to_cpu(bar->control) >> 12;
1800 if (rx->sta->ampdu_mlme.tid_state_rx[tid] 1821 if (!rx->sta->ampdu_mlme.tid_active_rx[tid]) {
1801 != HT_AGG_STATE_OPERATIONAL) 1822 spin_unlock(&rx->sta->lock);
1802 return RX_DROP_MONITOR; 1823 return RX_DROP_MONITOR;
1824 }
1803 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid]; 1825 tid_agg_rx = rx->sta->ampdu_mlme.tid_rx[tid];
1804 1826
1805 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4; 1827 start_seq_num = le16_to_cpu(bar->start_seq_num) >> 4;
@@ -1813,6 +1835,7 @@ ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
1813 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num, 1835 ieee80211_release_reorder_frames(hw, tid_agg_rx, start_seq_num,
1814 frames); 1836 frames);
1815 kfree_skb(skb); 1837 kfree_skb(skb);
1838 spin_unlock(&rx->sta->lock);
1816 return RX_QUEUED; 1839 return RX_QUEUED;
1817 } 1840 }
1818 1841
@@ -1974,8 +1997,8 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
1974 goto handled; 1997 goto handled;
1975 } 1998 }
1976 break; 1999 break;
1977 case MESH_PLINK_CATEGORY: 2000 case WLAN_CATEGORY_MESH_PLINK:
1978 case MESH_PATH_SEL_CATEGORY: 2001 case WLAN_CATEGORY_MESH_PATH_SEL:
1979 if (ieee80211_vif_is_mesh(&sdata->vif)) 2002 if (ieee80211_vif_is_mesh(&sdata->vif))
1980 return ieee80211_mesh_rx_mgmt(sdata, rx->skb); 2003 return ieee80211_mesh_rx_mgmt(sdata, rx->skb);
1981 break; 2004 break;
@@ -2372,29 +2395,42 @@ static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
2372 struct ieee80211_local *local = hw_to_local(hw); 2395 struct ieee80211_local *local = hw_to_local(hw);
2373 struct ieee80211_sub_if_data *sdata; 2396 struct ieee80211_sub_if_data *sdata;
2374 struct ieee80211_hdr *hdr; 2397 struct ieee80211_hdr *hdr;
2398 __le16 fc;
2375 struct ieee80211_rx_data rx; 2399 struct ieee80211_rx_data rx;
2376 int prepares; 2400 int prepares;
2377 struct ieee80211_sub_if_data *prev = NULL; 2401 struct ieee80211_sub_if_data *prev = NULL;
2378 struct sk_buff *skb_new; 2402 struct sk_buff *skb_new;
2379 struct sta_info *sta, *tmp; 2403 struct sta_info *sta, *tmp;
2380 bool found_sta = false; 2404 bool found_sta = false;
2405 int err = 0;
2381 2406
2382 hdr = (struct ieee80211_hdr *)skb->data; 2407 fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
2383 memset(&rx, 0, sizeof(rx)); 2408 memset(&rx, 0, sizeof(rx));
2384 rx.skb = skb; 2409 rx.skb = skb;
2385 rx.local = local; 2410 rx.local = local;
2386 2411
2387 if (ieee80211_is_data(hdr->frame_control) || ieee80211_is_mgmt(hdr->frame_control)) 2412 if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
2388 local->dot11ReceivedFragmentCount++; 2413 local->dot11ReceivedFragmentCount++;
2389 2414
2390 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) || 2415 if (unlikely(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
2391 test_bit(SCAN_OFF_CHANNEL, &local->scanning))) 2416 test_bit(SCAN_OFF_CHANNEL, &local->scanning)))
2392 rx.flags |= IEEE80211_RX_IN_SCAN; 2417 rx.flags |= IEEE80211_RX_IN_SCAN;
2393 2418
2419 if (ieee80211_is_mgmt(fc))
2420 err = skb_linearize(skb);
2421 else
2422 err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
2423
2424 if (err) {
2425 dev_kfree_skb(skb);
2426 return;
2427 }
2428
2429 hdr = (struct ieee80211_hdr *)skb->data;
2394 ieee80211_parse_qos(&rx); 2430 ieee80211_parse_qos(&rx);
2395 ieee80211_verify_alignment(&rx); 2431 ieee80211_verify_alignment(&rx);
2396 2432
2397 if (ieee80211_is_data(hdr->frame_control)) { 2433 if (ieee80211_is_data(fc)) {
2398 for_each_sta_info(local, hdr->addr2, sta, tmp) { 2434 for_each_sta_info(local, hdr->addr2, sta, tmp) {
2399 rx.sta = sta; 2435 rx.sta = sta;
2400 found_sta = true; 2436 found_sta = true;
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 85507bd9e341..e1a3defdf581 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -14,6 +14,8 @@
14 14
15#include <linux/if_arp.h> 15#include <linux/if_arp.h>
16#include <linux/rtnetlink.h> 16#include <linux/rtnetlink.h>
17#include <linux/pm_qos_params.h>
18#include <net/sch_generic.h>
17#include <linux/slab.h> 19#include <linux/slab.h>
18#include <net/mac80211.h> 20#include <net/mac80211.h>
19 21
@@ -246,6 +248,8 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted)
246 struct ieee80211_local *local = hw_to_local(hw); 248 struct ieee80211_local *local = hw_to_local(hw);
247 bool was_hw_scan; 249 bool was_hw_scan;
248 250
251 trace_api_scan_completed(local, aborted);
252
249 mutex_lock(&local->scan_mtx); 253 mutex_lock(&local->scan_mtx);
250 254
251 /* 255 /*
@@ -322,6 +326,7 @@ static int ieee80211_start_sw_scan(struct ieee80211_local *local)
322 326
323 ieee80211_offchannel_stop_beaconing(local); 327 ieee80211_offchannel_stop_beaconing(local);
324 328
329 local->leave_oper_channel_time = 0;
325 local->next_scan_state = SCAN_DECISION; 330 local->next_scan_state = SCAN_DECISION;
326 local->scan_channel_idx = 0; 331 local->scan_channel_idx = 0;
327 332
@@ -426,11 +431,28 @@ static int __ieee80211_start_scan(struct ieee80211_sub_if_data *sdata,
426 return rc; 431 return rc;
427} 432}
428 433
434static unsigned long
435ieee80211_scan_get_channel_time(struct ieee80211_channel *chan)
436{
437 /*
438 * TODO: channel switching also consumes quite some time,
439 * add that delay as well to get a better estimation
440 */
441 if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN)
442 return IEEE80211_PASSIVE_CHANNEL_TIME;
443 return IEEE80211_PROBE_DELAY + IEEE80211_CHANNEL_TIME;
444}
445
429static int ieee80211_scan_state_decision(struct ieee80211_local *local, 446static int ieee80211_scan_state_decision(struct ieee80211_local *local,
430 unsigned long *next_delay) 447 unsigned long *next_delay)
431{ 448{
432 bool associated = false; 449 bool associated = false;
450 bool tx_empty = true;
451 bool bad_latency;
452 bool listen_int_exceeded;
453 unsigned long min_beacon_int = 0;
433 struct ieee80211_sub_if_data *sdata; 454 struct ieee80211_sub_if_data *sdata;
455 struct ieee80211_channel *next_chan;
434 456
435 /* if no more bands/channels left, complete scan and advance to the idle state */ 457 /* if no more bands/channels left, complete scan and advance to the idle state */
436 if (local->scan_channel_idx >= local->scan_req->n_channels) { 458 if (local->scan_channel_idx >= local->scan_req->n_channels) {
@@ -438,7 +460,11 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
438 return 1; 460 return 1;
439 } 461 }
440 462
441 /* check if at least one STA interface is associated */ 463 /*
464 * check if at least one STA interface is associated,
465 * check if at least one STA interface has pending tx frames
466 * and grab the lowest used beacon interval
467 */
442 mutex_lock(&local->iflist_mtx); 468 mutex_lock(&local->iflist_mtx);
443 list_for_each_entry(sdata, &local->interfaces, list) { 469 list_for_each_entry(sdata, &local->interfaces, list) {
444 if (!ieee80211_sdata_running(sdata)) 470 if (!ieee80211_sdata_running(sdata))
@@ -447,7 +473,16 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
447 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 473 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
448 if (sdata->u.mgd.associated) { 474 if (sdata->u.mgd.associated) {
449 associated = true; 475 associated = true;
450 break; 476
477 if (sdata->vif.bss_conf.beacon_int <
478 min_beacon_int || min_beacon_int == 0)
479 min_beacon_int =
480 sdata->vif.bss_conf.beacon_int;
481
482 if (!qdisc_all_tx_empty(sdata->dev)) {
483 tx_empty = false;
484 break;
485 }
451 } 486 }
452 } 487 }
453 } 488 }
@@ -456,11 +491,34 @@ static int ieee80211_scan_state_decision(struct ieee80211_local *local,
456 if (local->scan_channel) { 491 if (local->scan_channel) {
457 /* 492 /*
458 * we're currently scanning a different channel, let's 493 * we're currently scanning a different channel, let's
459 * switch back to the operating channel now if at least 494 * see if we can scan another channel without interfering
460 * one interface is associated. Otherwise just scan the 495 * with the current traffic situation.
461 * next channel 496 *
497 * Since we don't know if the AP has pending frames for us
498 * we can only check for our tx queues and use the current
499 * pm_qos requirements for rx. Hence, if no tx traffic occurs
500 * at all we will scan as many channels in a row as the pm_qos
501 * latency allows us to. Additionally we also check for the
502 * currently negotiated listen interval to prevent losing
503 * frames unnecessarily.
504 *
505 * Otherwise switch back to the operating channel.
462 */ 506 */
463 if (associated) 507 next_chan = local->scan_req->channels[local->scan_channel_idx];
508
509 bad_latency = time_after(jiffies +
510 ieee80211_scan_get_channel_time(next_chan),
511 local->leave_oper_channel_time +
512 usecs_to_jiffies(pm_qos_requirement(PM_QOS_NETWORK_LATENCY)));
513
514 listen_int_exceeded = time_after(jiffies +
515 ieee80211_scan_get_channel_time(next_chan),
516 local->leave_oper_channel_time +
517 usecs_to_jiffies(min_beacon_int * 1024) *
518 local->hw.conf.listen_interval);
519
520 if (associated && ( !tx_empty || bad_latency ||
521 listen_int_exceeded))
464 local->next_scan_state = SCAN_ENTER_OPER_CHANNEL; 522 local->next_scan_state = SCAN_ENTER_OPER_CHANNEL;
465 else 523 else
466 local->next_scan_state = SCAN_SET_CHANNEL; 524 local->next_scan_state = SCAN_SET_CHANNEL;
@@ -492,6 +550,9 @@ static void ieee80211_scan_state_leave_oper_channel(struct ieee80211_local *loca
492 else 550 else
493 *next_delay = HZ / 10; 551 *next_delay = HZ / 10;
494 552
553 /* remember when we left the operating channel */
554 local->leave_oper_channel_time = jiffies;
555
495 /* advance to the next channel to be scanned */ 556 /* advance to the next channel to be scanned */
496 local->next_scan_state = SCAN_SET_CHANNEL; 557 local->next_scan_state = SCAN_SET_CHANNEL;
497} 558}
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index fb12cec4d333..ff0eb948917b 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -250,9 +250,6 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
250 * enable session_timer's data differentiation. refer to 250 * enable session_timer's data differentiation. refer to
251 * sta_rx_agg_session_timer_expired for useage */ 251 * sta_rx_agg_session_timer_expired for useage */
252 sta->timer_to_tid[i] = i; 252 sta->timer_to_tid[i] = i;
253 /* rx */
254 sta->ampdu_mlme.tid_state_rx[i] = HT_AGG_STATE_IDLE;
255 sta->ampdu_mlme.tid_rx[i] = NULL;
256 /* tx */ 253 /* tx */
257 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE; 254 sta->ampdu_mlme.tid_state_tx[i] = HT_AGG_STATE_IDLE;
258 sta->ampdu_mlme.tid_tx[i] = NULL; 255 sta->ampdu_mlme.tid_tx[i] = NULL;
@@ -619,7 +616,7 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
619 struct ieee80211_sub_if_data *sdata; 616 struct ieee80211_sub_if_data *sdata;
620 struct sk_buff *skb; 617 struct sk_buff *skb;
621 unsigned long flags; 618 unsigned long flags;
622 int ret, i; 619 int ret;
623 620
624 might_sleep(); 621 might_sleep();
625 622
@@ -629,6 +626,15 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
629 local = sta->local; 626 local = sta->local;
630 sdata = sta->sdata; 627 sdata = sta->sdata;
631 628
629 /*
630 * Before removing the station from the driver and
631 * rate control, it might still start new aggregation
632 * sessions -- block that to make sure the tear-down
633 * will be sufficient.
634 */
635 set_sta_flags(sta, WLAN_STA_BLOCK_BA);
636 ieee80211_sta_tear_down_BA_sessions(sta);
637
632 spin_lock_irqsave(&local->sta_lock, flags); 638 spin_lock_irqsave(&local->sta_lock, flags);
633 ret = sta_info_hash_del(local, sta); 639 ret = sta_info_hash_del(local, sta);
634 /* this might still be the pending list ... which is fine */ 640 /* this might still be the pending list ... which is fine */
@@ -645,9 +651,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
645 * may mean it is removed from hardware which requires that 651 * may mean it is removed from hardware which requires that
646 * the key->sta pointer is still valid, so flush the key todo 652 * the key->sta pointer is still valid, so flush the key todo
647 * list here. 653 * list here.
648 *
649 * ieee80211_key_todo() will synchronize_rcu() so after this
650 * nothing can reference this sta struct any more.
651 */ 654 */
652 ieee80211_key_todo(); 655 ieee80211_key_todo();
653 656
@@ -679,11 +682,17 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
679 sdata = sta->sdata; 682 sdata = sta->sdata;
680 } 683 }
681 684
685 /*
686 * At this point, after we wait for an RCU grace period,
687 * neither mac80211 nor the driver can reference this
688 * sta struct any more except by still existing timers
689 * associated with this station that we clean up below.
690 */
691 synchronize_rcu();
692
682#ifdef CONFIG_MAC80211_MESH 693#ifdef CONFIG_MAC80211_MESH
683 if (ieee80211_vif_is_mesh(&sdata->vif)) { 694 if (ieee80211_vif_is_mesh(&sdata->vif))
684 mesh_accept_plinks_update(sdata); 695 mesh_accept_plinks_update(sdata);
685 del_timer(&sta->plink_timer);
686 }
687#endif 696#endif
688 697
689#ifdef CONFIG_MAC80211_VERBOSE_DEBUG 698#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
@@ -710,50 +719,6 @@ static int __must_check __sta_info_destroy(struct sta_info *sta)
710 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL) 719 while ((skb = skb_dequeue(&sta->tx_filtered)) != NULL)
711 dev_kfree_skb_any(skb); 720 dev_kfree_skb_any(skb);
712 721
713 for (i = 0; i < STA_TID_NUM; i++) {
714 struct tid_ampdu_rx *tid_rx;
715 struct tid_ampdu_tx *tid_tx;
716
717 spin_lock_bh(&sta->lock);
718 tid_rx = sta->ampdu_mlme.tid_rx[i];
719 /* Make sure timer won't free the tid_rx struct, see below */
720 if (tid_rx)
721 tid_rx->shutdown = true;
722
723 spin_unlock_bh(&sta->lock);
724
725 /*
726 * Outside spinlock - shutdown is true now so that the timer
727 * won't free tid_rx, we have to do that now. Can't let the
728 * timer do it because we have to sync the timer outside the
729 * lock that it takes itself.
730 */
731 if (tid_rx) {
732 del_timer_sync(&tid_rx->session_timer);
733 kfree(tid_rx);
734 }
735
736 /*
737 * No need to do such complications for TX agg sessions, the
738 * path leading to freeing the tid_tx struct goes via a call
739 * from the driver, and thus needs to look up the sta struct
740 * again, which cannot be found when we get here. Hence, we
741 * just need to delete the timer and free the aggregation
742 * info; we won't be telling the peer about it then but that
743 * doesn't matter if we're not talking to it again anyway.
744 */
745 tid_tx = sta->ampdu_mlme.tid_tx[i];
746 if (tid_tx) {
747 del_timer_sync(&tid_tx->addba_resp_timer);
748 /*
749 * STA removed while aggregation session being
750 * started? Bit odd, but purge frames anyway.
751 */
752 skb_queue_purge(&tid_tx->pending);
753 kfree(tid_tx);
754 }
755 }
756
757 __sta_info_free(local, sta); 722 __sta_info_free(local, sta);
758 723
759 return 0; 724 return 0;
@@ -992,6 +957,8 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw,
992{ 957{
993 struct sta_info *sta = container_of(pubsta, struct sta_info, sta); 958 struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
994 959
960 trace_api_sta_block_awake(sta->local, pubsta, block);
961
995 if (block) 962 if (block)
996 set_sta_flags(sta, WLAN_STA_PS_DRIVER); 963 set_sta_flags(sta, WLAN_STA_PS_DRIVER);
997 else 964 else
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 822d84522937..48a5e80957f0 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -35,8 +35,8 @@
35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next 35 * IEEE80211_TX_CTL_CLEAR_PS_FILT control flag) when the next
36 * frame to this station is transmitted. 36 * frame to this station is transmitted.
37 * @WLAN_STA_MFP: Management frame protection is used with this STA. 37 * @WLAN_STA_MFP: Management frame protection is used with this STA.
38 * @WLAN_STA_SUSPEND: Set/cleared during a suspend/resume cycle. 38 * @WLAN_STA_BLOCK_BA: Used to deny ADDBA requests (both TX and RX)
39 * Used to deny ADDBA requests (both TX and RX). 39 * during suspend/resume and station removal.
40 * @WLAN_STA_PS_DRIVER: driver requires keeping this station in 40 * @WLAN_STA_PS_DRIVER: driver requires keeping this station in
41 * power-save mode logically to flush frames that might still 41 * power-save mode logically to flush frames that might still
42 * be in the queues 42 * be in the queues
@@ -57,7 +57,7 @@ enum ieee80211_sta_info_flags {
57 WLAN_STA_WDS = 1<<7, 57 WLAN_STA_WDS = 1<<7,
58 WLAN_STA_CLEAR_PS_FILT = 1<<9, 58 WLAN_STA_CLEAR_PS_FILT = 1<<9,
59 WLAN_STA_MFP = 1<<10, 59 WLAN_STA_MFP = 1<<10,
60 WLAN_STA_SUSPEND = 1<<11, 60 WLAN_STA_BLOCK_BA = 1<<11,
61 WLAN_STA_PS_DRIVER = 1<<12, 61 WLAN_STA_PS_DRIVER = 1<<12,
62 WLAN_STA_PSPOLL = 1<<13, 62 WLAN_STA_PSPOLL = 1<<13,
63 WLAN_STA_DISASSOC = 1<<14, 63 WLAN_STA_DISASSOC = 1<<14,
@@ -106,7 +106,6 @@ struct tid_ampdu_tx {
106 * @buf_size: buffer size for incoming A-MPDUs 106 * @buf_size: buffer size for incoming A-MPDUs
107 * @timeout: reset timer value (in TUs). 107 * @timeout: reset timer value (in TUs).
108 * @dialog_token: dialog token for aggregation session 108 * @dialog_token: dialog token for aggregation session
109 * @shutdown: this session is being shut down due to STA removal
110 */ 109 */
111struct tid_ampdu_rx { 110struct tid_ampdu_rx {
112 struct sk_buff **reorder_buf; 111 struct sk_buff **reorder_buf;
@@ -118,7 +117,6 @@ struct tid_ampdu_rx {
118 u16 buf_size; 117 u16 buf_size;
119 u16 timeout; 118 u16 timeout;
120 u8 dialog_token; 119 u8 dialog_token;
121 bool shutdown;
122}; 120};
123 121
124/** 122/**
@@ -156,7 +154,7 @@ enum plink_state {
156 */ 154 */
157struct sta_ampdu_mlme { 155struct sta_ampdu_mlme {
158 /* rx */ 156 /* rx */
159 u8 tid_state_rx[STA_TID_NUM]; 157 bool tid_active_rx[STA_TID_NUM];
160 struct tid_ampdu_rx *tid_rx[STA_TID_NUM]; 158 struct tid_ampdu_rx *tid_rx[STA_TID_NUM];
161 /* tx */ 159 /* tx */
162 u8 tid_state_tx[STA_TID_NUM]; 160 u8 tid_state_tx[STA_TID_NUM];
@@ -200,7 +198,6 @@ struct sta_ampdu_mlme {
200 * @rx_fragments: number of received MPDUs 198 * @rx_fragments: number of received MPDUs
201 * @rx_dropped: number of dropped MPDUs from this STA 199 * @rx_dropped: number of dropped MPDUs from this STA
202 * @last_signal: signal of last received frame from this STA 200 * @last_signal: signal of last received frame from this STA
203 * @last_noise: noise of last received frame from this STA
204 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue) 201 * @last_seq_ctrl: last received seq/frag number from this STA (per RX queue)
205 * @tx_filtered_count: number of frames the hardware filtered for this STA 202 * @tx_filtered_count: number of frames the hardware filtered for this STA
206 * @tx_retry_failed: number of frames that failed retry 203 * @tx_retry_failed: number of frames that failed retry
@@ -267,7 +264,6 @@ struct sta_info {
267 unsigned long rx_fragments; 264 unsigned long rx_fragments;
268 unsigned long rx_dropped; 265 unsigned long rx_dropped;
269 int last_signal; 266 int last_signal;
270 int last_noise;
271 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES]; 267 __le16 last_seq_ctrl[NUM_RX_DATA_QUEUES];
272 268
273 /* Updated from TX status path only, no locking requirements */ 269 /* Updated from TX status path only, no locking requirements */
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 56d5b9a6ec5b..11805a3a626f 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -171,7 +171,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
171 struct net_device *prev_dev = NULL; 171 struct net_device *prev_dev = NULL;
172 struct sta_info *sta, *tmp; 172 struct sta_info *sta, *tmp;
173 int retry_count = -1, i; 173 int retry_count = -1, i;
174 bool injected; 174 bool send_to_cooked;
175 175
176 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { 176 for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
177 /* the HW cannot have attempted that rate */ 177 /* the HW cannot have attempted that rate */
@@ -296,11 +296,15 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
296 /* this was a transmitted frame, but now we want to reuse it */ 296 /* this was a transmitted frame, but now we want to reuse it */
297 skb_orphan(skb); 297 skb_orphan(skb);
298 298
299 /* Need to make a copy before skb->cb gets cleared */
300 send_to_cooked = !!(info->flags & IEEE80211_TX_CTL_INJECTED) ||
301 (type != IEEE80211_FTYPE_DATA);
302
299 /* 303 /*
300 * This is a bit racy but we can avoid a lot of work 304 * This is a bit racy but we can avoid a lot of work
301 * with this test... 305 * with this test...
302 */ 306 */
303 if (!local->monitors && !local->cooked_mntrs) { 307 if (!local->monitors && (!send_to_cooked || !local->cooked_mntrs)) {
304 dev_kfree_skb(skb); 308 dev_kfree_skb(skb);
305 return; 309 return;
306 } 310 }
@@ -345,9 +349,6 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
345 /* for now report the total retry_count */ 349 /* for now report the total retry_count */
346 rthdr->data_retries = retry_count; 350 rthdr->data_retries = retry_count;
347 351
348 /* Need to make a copy before skb->cb gets cleared */
349 injected = !!(info->flags & IEEE80211_TX_CTL_INJECTED);
350
351 /* XXX: is this sufficient for BPF? */ 352 /* XXX: is this sufficient for BPF? */
352 skb_set_mac_header(skb, 0); 353 skb_set_mac_header(skb, 0);
353 skb->ip_summed = CHECKSUM_UNNECESSARY; 354 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -362,8 +363,7 @@ void ieee80211_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb)
362 continue; 363 continue;
363 364
364 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) && 365 if ((sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) &&
365 !injected && 366 !send_to_cooked)
366 (type == IEEE80211_FTYPE_DATA))
367 continue; 367 continue;
368 368
369 if (prev_dev) { 369 if (prev_dev) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index cfc473e1b050..2cb77267f733 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -513,6 +513,8 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
513 else if (tx->sta && (key = rcu_dereference(tx->sta->key))) 513 else if (tx->sta && (key = rcu_dereference(tx->sta->key)))
514 tx->key = key; 514 tx->key = key;
515 else if (ieee80211_is_mgmt(hdr->frame_control) && 515 else if (ieee80211_is_mgmt(hdr->frame_control) &&
516 is_multicast_ether_addr(hdr->addr1) &&
517 ieee80211_is_robust_mgmt_frame(hdr) &&
516 (key = rcu_dereference(tx->sdata->default_mgmt_key))) 518 (key = rcu_dereference(tx->sdata->default_mgmt_key)))
517 tx->key = key; 519 tx->key = key;
518 else if ((key = rcu_dereference(tx->sdata->default_key))) 520 else if ((key = rcu_dereference(tx->sdata->default_key)))
@@ -1142,13 +1144,12 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1142 1144
1143 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && 1145 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) &&
1144 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) { 1146 (local->hw.flags & IEEE80211_HW_AMPDU_AGGREGATION)) {
1145 unsigned long flags;
1146 struct tid_ampdu_tx *tid_tx; 1147 struct tid_ampdu_tx *tid_tx;
1147 1148
1148 qc = ieee80211_get_qos_ctl(hdr); 1149 qc = ieee80211_get_qos_ctl(hdr);
1149 tid = *qc & IEEE80211_QOS_CTL_TID_MASK; 1150 tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
1150 1151
1151 spin_lock_irqsave(&tx->sta->lock, flags); 1152 spin_lock(&tx->sta->lock);
1152 /* 1153 /*
1153 * XXX: This spinlock could be fairly expensive, but see the 1154 * XXX: This spinlock could be fairly expensive, but see the
1154 * comment in agg-tx.c:ieee80211_agg_tx_operational(). 1155 * comment in agg-tx.c:ieee80211_agg_tx_operational().
@@ -1173,7 +1174,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
1173 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1174 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1174 __skb_queue_tail(&tid_tx->pending, skb); 1175 __skb_queue_tail(&tid_tx->pending, skb);
1175 } 1176 }
1176 spin_unlock_irqrestore(&tx->sta->lock, flags); 1177 spin_unlock(&tx->sta->lock);
1177 1178
1178 if (unlikely(queued)) 1179 if (unlikely(queued))
1179 return TX_QUEUED; 1180 return TX_QUEUED;
@@ -2011,14 +2012,12 @@ void ieee80211_tx_pending(unsigned long data)
2011 while (!skb_queue_empty(&local->pending[i])) { 2012 while (!skb_queue_empty(&local->pending[i])) {
2012 struct sk_buff *skb = __skb_dequeue(&local->pending[i]); 2013 struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
2013 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2014 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2014 struct ieee80211_sub_if_data *sdata;
2015 2015
2016 if (WARN_ON(!info->control.vif)) { 2016 if (WARN_ON(!info->control.vif)) {
2017 kfree_skb(skb); 2017 kfree_skb(skb);
2018 continue; 2018 continue;
2019 } 2019 }
2020 2020
2021 sdata = vif_to_sdata(info->control.vif);
2022 spin_unlock_irqrestore(&local->queue_stop_reason_lock, 2021 spin_unlock_irqrestore(&local->queue_stop_reason_lock,
2023 flags); 2022 flags);
2024 2023
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 53af57047435..2b75b4fb68f4 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -270,6 +270,8 @@ static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue,
270 struct ieee80211_local *local = hw_to_local(hw); 270 struct ieee80211_local *local = hw_to_local(hw);
271 struct ieee80211_sub_if_data *sdata; 271 struct ieee80211_sub_if_data *sdata;
272 272
273 trace_wake_queue(local, queue, reason);
274
273 if (WARN_ON(queue >= hw->queues)) 275 if (WARN_ON(queue >= hw->queues))
274 return; 276 return;
275 277
@@ -312,6 +314,8 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
312 struct ieee80211_local *local = hw_to_local(hw); 314 struct ieee80211_local *local = hw_to_local(hw);
313 struct ieee80211_sub_if_data *sdata; 315 struct ieee80211_sub_if_data *sdata;
314 316
317 trace_stop_queue(local, queue, reason);
318
315 if (WARN_ON(queue >= hw->queues)) 319 if (WARN_ON(queue >= hw->queues))
316 return; 320 return;
317 321
@@ -796,6 +800,11 @@ void ieee80211_set_wmm_default(struct ieee80211_sub_if_data *sdata)
796 800
797 drv_conf_tx(local, queue, &qparam); 801 drv_conf_tx(local, queue, &qparam);
798 } 802 }
803
804 /* after reinitialize QoS TX queues setting to default,
805 * disable QoS at all */
806 local->hw.conf.flags &= ~IEEE80211_CONF_QOS;
807 drv_config(local, IEEE80211_CONF_CHANGE_QOS);
799} 808}
800 809
801void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata, 810void ieee80211_sta_def_wmm_params(struct ieee80211_sub_if_data *sdata,
@@ -1135,7 +1144,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1135 1144
1136 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) { 1145 if (hw->flags & IEEE80211_HW_AMPDU_AGGREGATION) {
1137 list_for_each_entry_rcu(sta, &local->sta_list, list) { 1146 list_for_each_entry_rcu(sta, &local->sta_list, list) {
1138 clear_sta_flags(sta, WLAN_STA_SUSPEND); 1147 clear_sta_flags(sta, WLAN_STA_BLOCK_BA);
1139 } 1148 }
1140 } 1149 }
1141 1150
diff --git a/net/mac80211/work.c b/net/mac80211/work.c
index 15e1ba931b87..bdb1d05b16fc 100644
--- a/net/mac80211/work.c
+++ b/net/mac80211/work.c
@@ -920,11 +920,16 @@ static void ieee80211_work_work(struct work_struct *work)
920 run_again(local, jiffies + HZ/2); 920 run_again(local, jiffies + HZ/2);
921 } 921 }
922 922
923 if (list_empty(&local->work_list) && local->scan_req) 923 mutex_lock(&local->scan_mtx);
924
925 if (list_empty(&local->work_list) && local->scan_req &&
926 !local->scanning)
924 ieee80211_queue_delayed_work(&local->hw, 927 ieee80211_queue_delayed_work(&local->hw,
925 &local->scan_work, 928 &local->scan_work,
926 round_jiffies_relative(0)); 929 round_jiffies_relative(0));
927 930
931 mutex_unlock(&local->scan_mtx);
932
928 mutex_unlock(&local->work_mtx); 933 mutex_unlock(&local->work_mtx);
929 934
930 ieee80211_recalc_idle(local); 935 ieee80211_recalc_idle(local);
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 8fb0ae616761..7ba06939829f 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -802,7 +802,7 @@ static int sync_thread_backup(void *data)
802 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid); 802 ip_vs_backup_mcast_ifn, ip_vs_backup_syncid);
803 803
804 while (!kthread_should_stop()) { 804 while (!kthread_should_stop()) {
805 wait_event_interruptible(*tinfo->sock->sk->sk_sleep, 805 wait_event_interruptible(*sk_sleep(tinfo->sock->sk),
806 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) 806 !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue)
807 || kthread_should_stop()); 807 || kthread_should_stop());
808 808
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 795424396aff..6464a1972a69 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -545,7 +545,7 @@ static int netlink_autobind(struct socket *sock)
545 struct hlist_head *head; 545 struct hlist_head *head;
546 struct sock *osk; 546 struct sock *osk;
547 struct hlist_node *node; 547 struct hlist_node *node;
548 s32 pid = current->tgid; 548 s32 pid = task_tgid_vnr(current);
549 int err; 549 int err;
550 static s32 rover = -4097; 550 static s32 rover = -4097;
551 551
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 06438fa2b1e5..aa4308afcc7f 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -21,15 +21,17 @@
21 21
22static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ 22static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
23 23
24static inline void genl_lock(void) 24void genl_lock(void)
25{ 25{
26 mutex_lock(&genl_mutex); 26 mutex_lock(&genl_mutex);
27} 27}
28EXPORT_SYMBOL(genl_lock);
28 29
29static inline void genl_unlock(void) 30void genl_unlock(void)
30{ 31{
31 mutex_unlock(&genl_mutex); 32 mutex_unlock(&genl_mutex);
32} 33}
34EXPORT_SYMBOL(genl_unlock);
33 35
34#define GENL_FAM_TAB_SIZE 16 36#define GENL_FAM_TAB_SIZE 16
35#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 37#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index fa07f044b599..06cb02796a0e 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -739,7 +739,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
739 DEFINE_WAIT(wait); 739 DEFINE_WAIT(wait);
740 740
741 for (;;) { 741 for (;;) {
742 prepare_to_wait(sk->sk_sleep, &wait, 742 prepare_to_wait(sk_sleep(sk), &wait,
743 TASK_INTERRUPTIBLE); 743 TASK_INTERRUPTIBLE);
744 if (sk->sk_state != TCP_SYN_SENT) 744 if (sk->sk_state != TCP_SYN_SENT)
745 break; 745 break;
@@ -752,7 +752,7 @@ static int nr_connect(struct socket *sock, struct sockaddr *uaddr,
752 err = -ERESTARTSYS; 752 err = -ERESTARTSYS;
753 break; 753 break;
754 } 754 }
755 finish_wait(sk->sk_sleep, &wait); 755 finish_wait(sk_sleep(sk), &wait);
756 if (err) 756 if (err)
757 goto out_release; 757 goto out_release;
758 } 758 }
@@ -798,7 +798,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
798 * hooked into the SABM we saved 798 * hooked into the SABM we saved
799 */ 799 */
800 for (;;) { 800 for (;;) {
801 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 801 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
802 skb = skb_dequeue(&sk->sk_receive_queue); 802 skb = skb_dequeue(&sk->sk_receive_queue);
803 if (skb) 803 if (skb)
804 break; 804 break;
@@ -816,7 +816,7 @@ static int nr_accept(struct socket *sock, struct socket *newsock, int flags)
816 err = -ERESTARTSYS; 816 err = -ERESTARTSYS;
817 break; 817 break;
818 } 818 }
819 finish_wait(sk->sk_sleep, &wait); 819 finish_wait(sk_sleep(sk), &wait);
820 if (err) 820 if (err)
821 goto out_release; 821 goto out_release;
822 822
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 243946d4809d..2078a277e06b 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -82,6 +82,7 @@
82#include <linux/mutex.h> 82#include <linux/mutex.h>
83#include <linux/if_vlan.h> 83#include <linux/if_vlan.h>
84#include <linux/virtio_net.h> 84#include <linux/virtio_net.h>
85#include <linux/errqueue.h>
85 86
86#ifdef CONFIG_INET 87#ifdef CONFIG_INET
87#include <net/inet_common.h> 88#include <net/inet_common.h>
@@ -315,6 +316,8 @@ static inline struct packet_sock *pkt_sk(struct sock *sk)
315 316
316static void packet_sock_destruct(struct sock *sk) 317static void packet_sock_destruct(struct sock *sk)
317{ 318{
319 skb_queue_purge(&sk->sk_error_queue);
320
318 WARN_ON(atomic_read(&sk->sk_rmem_alloc)); 321 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
319 WARN_ON(atomic_read(&sk->sk_wmem_alloc)); 322 WARN_ON(atomic_read(&sk->sk_wmem_alloc));
320 323
@@ -483,6 +486,9 @@ retry:
483 skb->dev = dev; 486 skb->dev = dev;
484 skb->priority = sk->sk_priority; 487 skb->priority = sk->sk_priority;
485 skb->mark = sk->sk_mark; 488 skb->mark = sk->sk_mark;
489 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
490 if (err < 0)
491 goto out_unlock;
486 492
487 dev_queue_xmit(skb); 493 dev_queue_xmit(skb);
488 rcu_read_unlock(); 494 rcu_read_unlock();
@@ -1188,6 +1194,9 @@ static int packet_snd(struct socket *sock,
1188 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len); 1194 err = skb_copy_datagram_from_iovec(skb, offset, msg->msg_iov, 0, len);
1189 if (err) 1195 if (err)
1190 goto out_free; 1196 goto out_free;
1197 err = sock_tx_timestamp(msg, sk, skb_tx(skb));
1198 if (err < 0)
1199 goto out_free;
1191 1200
1192 skb->protocol = proto; 1201 skb->protocol = proto;
1193 skb->dev = dev; 1202 skb->dev = dev;
@@ -1487,6 +1496,51 @@ out:
1487 return err; 1496 return err;
1488} 1497}
1489 1498
1499static int packet_recv_error(struct sock *sk, struct msghdr *msg, int len)
1500{
1501 struct sock_exterr_skb *serr;
1502 struct sk_buff *skb, *skb2;
1503 int copied, err;
1504
1505 err = -EAGAIN;
1506 skb = skb_dequeue(&sk->sk_error_queue);
1507 if (skb == NULL)
1508 goto out;
1509
1510 copied = skb->len;
1511 if (copied > len) {
1512 msg->msg_flags |= MSG_TRUNC;
1513 copied = len;
1514 }
1515 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
1516 if (err)
1517 goto out_free_skb;
1518
1519 sock_recv_timestamp(msg, sk, skb);
1520
1521 serr = SKB_EXT_ERR(skb);
1522 put_cmsg(msg, SOL_PACKET, PACKET_TX_TIMESTAMP,
1523 sizeof(serr->ee), &serr->ee);
1524
1525 msg->msg_flags |= MSG_ERRQUEUE;
1526 err = copied;
1527
1528 /* Reset and regenerate socket error */
1529 spin_lock_bh(&sk->sk_error_queue.lock);
1530 sk->sk_err = 0;
1531 if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
1532 sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
1533 spin_unlock_bh(&sk->sk_error_queue.lock);
1534 sk->sk_error_report(sk);
1535 } else
1536 spin_unlock_bh(&sk->sk_error_queue.lock);
1537
1538out_free_skb:
1539 kfree_skb(skb);
1540out:
1541 return err;
1542}
1543
1490/* 1544/*
1491 * Pull a packet from our receive queue and hand it to the user. 1545 * Pull a packet from our receive queue and hand it to the user.
1492 * If necessary we block. 1546 * If necessary we block.
@@ -1502,7 +1556,7 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1502 int vnet_hdr_len = 0; 1556 int vnet_hdr_len = 0;
1503 1557
1504 err = -EINVAL; 1558 err = -EINVAL;
1505 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT)) 1559 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
1506 goto out; 1560 goto out;
1507 1561
1508#if 0 1562#if 0
@@ -1511,6 +1565,11 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
1511 return -ENODEV; 1565 return -ENODEV;
1512#endif 1566#endif
1513 1567
1568 if (flags & MSG_ERRQUEUE) {
1569 err = packet_recv_error(sk, msg, len);
1570 goto out;
1571 }
1572
1514 /* 1573 /*
1515 * Call the generic datagram receiver. This handles all sorts 1574 * Call the generic datagram receiver. This handles all sorts
1516 * of horrible races and re-entrancy so we can forget about it 1575 * of horrible races and re-entrancy so we can forget about it
@@ -1692,9 +1751,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1692 if (i->alen != dev->addr_len) 1751 if (i->alen != dev->addr_len)
1693 return -EINVAL; 1752 return -EINVAL;
1694 if (what > 0) 1753 if (what > 0)
1695 return dev_mc_add(dev, i->addr, i->alen, 0); 1754 return dev_mc_add(dev, i->addr);
1696 else 1755 else
1697 return dev_mc_delete(dev, i->addr, i->alen, 0); 1756 return dev_mc_del(dev, i->addr);
1698 break; 1757 break;
1699 case PACKET_MR_PROMISC: 1758 case PACKET_MR_PROMISC:
1700 return dev_set_promiscuity(dev, what); 1759 return dev_set_promiscuity(dev, what);
@@ -1706,9 +1765,9 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
1706 if (i->alen != dev->addr_len) 1765 if (i->alen != dev->addr_len)
1707 return -EINVAL; 1766 return -EINVAL;
1708 if (what > 0) 1767 if (what > 0)
1709 return dev_unicast_add(dev, i->addr); 1768 return dev_uc_add(dev, i->addr);
1710 else 1769 else
1711 return dev_unicast_delete(dev, i->addr); 1770 return dev_uc_del(dev, i->addr);
1712 break; 1771 break;
1713 default: 1772 default:
1714 break; 1773 break;
diff --git a/net/rds/af_rds.c b/net/rds/af_rds.c
index f81862baf4d0..aebfecbdb841 100644
--- a/net/rds/af_rds.c
+++ b/net/rds/af_rds.c
@@ -158,9 +158,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
158 unsigned int mask = 0; 158 unsigned int mask = 0;
159 unsigned long flags; 159 unsigned long flags;
160 160
161 poll_wait(file, sk->sk_sleep, wait); 161 poll_wait(file, sk_sleep(sk), wait);
162 162
163 poll_wait(file, &rds_poll_waitq, wait); 163 if (rs->rs_seen_congestion)
164 poll_wait(file, &rds_poll_waitq, wait);
164 165
165 read_lock_irqsave(&rs->rs_recv_lock, flags); 166 read_lock_irqsave(&rs->rs_recv_lock, flags);
166 if (!rs->rs_cong_monitor) { 167 if (!rs->rs_cong_monitor) {
@@ -182,6 +183,10 @@ static unsigned int rds_poll(struct file *file, struct socket *sock,
182 mask |= (POLLOUT | POLLWRNORM); 183 mask |= (POLLOUT | POLLWRNORM);
183 read_unlock_irqrestore(&rs->rs_recv_lock, flags); 184 read_unlock_irqrestore(&rs->rs_recv_lock, flags);
184 185
186 /* clear state any time we wake a seen-congested socket */
187 if (mask)
188 rs->rs_seen_congestion = 0;
189
185 return mask; 190 return mask;
186} 191}
187 192
@@ -447,7 +452,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
447 struct rds_info_lengths *lens) 452 struct rds_info_lengths *lens)
448{ 453{
449 struct rds_sock *rs; 454 struct rds_sock *rs;
450 struct sock *sk;
451 struct rds_incoming *inc; 455 struct rds_incoming *inc;
452 unsigned long flags; 456 unsigned long flags;
453 unsigned int total = 0; 457 unsigned int total = 0;
@@ -457,7 +461,6 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
457 spin_lock_irqsave(&rds_sock_lock, flags); 461 spin_lock_irqsave(&rds_sock_lock, flags);
458 462
459 list_for_each_entry(rs, &rds_sock_list, rs_item) { 463 list_for_each_entry(rs, &rds_sock_list, rs_item) {
460 sk = rds_rs_to_sk(rs);
461 read_lock(&rs->rs_recv_lock); 464 read_lock(&rs->rs_recv_lock);
462 465
463 /* XXX too lazy to maintain counts.. */ 466 /* XXX too lazy to maintain counts.. */
diff --git a/net/rds/cong.c b/net/rds/cong.c
index f1da27ceb064..0871a29f0780 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -219,8 +219,6 @@ void rds_cong_queue_updates(struct rds_cong_map *map)
219 spin_lock_irqsave(&rds_cong_lock, flags); 219 spin_lock_irqsave(&rds_cong_lock, flags);
220 220
221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) {
222 if (conn->c_loopback)
223 continue;
224 if (!test_and_set_bit(0, &conn->c_map_queued)) { 222 if (!test_and_set_bit(0, &conn->c_map_queued)) {
225 rds_stats_inc(s_cong_update_queued); 223 rds_stats_inc(s_cong_update_queued);
226 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 224 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
index 88d0856cb797..10ed0d55f759 100644
--- a/net/rds/ib_cm.c
+++ b/net/rds/ib_cm.c
@@ -204,9 +204,10 @@ static void rds_ib_qp_event_handler(struct ib_event *event, void *data)
204 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST); 204 rdma_notify(ic->i_cm_id, IB_EVENT_COMM_EST);
205 break; 205 break;
206 default: 206 default:
207 rds_ib_conn_error(conn, "RDS/IB: Fatal QP Event %u " 207 rdsdebug("Fatal QP Event %u "
208 "- connection %pI4->%pI4, reconnecting\n", 208 "- connection %pI4->%pI4, reconnecting\n",
209 event->event, &conn->c_laddr, &conn->c_faddr); 209 event->event, &conn->c_laddr, &conn->c_faddr);
210 rds_conn_drop(conn);
210 break; 211 break;
211 } 212 }
212} 213}
diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c
index 059989fdb7d7..a54cd63f9e35 100644
--- a/net/rds/ib_rdma.c
+++ b/net/rds/ib_rdma.c
@@ -235,8 +235,8 @@ void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
235{ 235{
236 flush_workqueue(rds_wq); 236 flush_workqueue(rds_wq);
237 rds_ib_flush_mr_pool(pool, 1); 237 rds_ib_flush_mr_pool(pool, 1);
238 BUG_ON(atomic_read(&pool->item_count)); 238 WARN_ON(atomic_read(&pool->item_count));
239 BUG_ON(atomic_read(&pool->free_pinned)); 239 WARN_ON(atomic_read(&pool->free_pinned));
240 kfree(pool); 240 kfree(pool);
241} 241}
242 242
@@ -441,6 +441,7 @@ static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
441 441
442 /* FIXME we need a way to tell a r/w MR 442 /* FIXME we need a way to tell a r/w MR
443 * from a r/o MR */ 443 * from a r/o MR */
444 BUG_ON(in_interrupt());
444 set_page_dirty(page); 445 set_page_dirty(page);
445 put_page(page); 446 put_page(page);
446 } 447 }
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index c7dd11b835f0..c74e9904a6b2 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -469,8 +469,8 @@ static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credi
469 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 469 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
470 470
471 rds_ib_stats_inc(s_ib_ack_send_failure); 471 rds_ib_stats_inc(s_ib_ack_send_failure);
472 /* Need to finesse this later. */ 472
473 BUG(); 473 rds_ib_conn_error(ic->conn, "sending ack failed\n");
474 } else 474 } else
475 rds_ib_stats_inc(s_ib_ack_sent); 475 rds_ib_stats_inc(s_ib_ack_sent);
476} 476}
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index a10fab6886d1..17fa80803ab0 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -243,8 +243,12 @@ void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
243 struct rds_message *rm; 243 struct rds_message *rm;
244 244
245 rm = rds_send_get_message(conn, send->s_op); 245 rm = rds_send_get_message(conn, send->s_op);
246 if (rm) 246 if (rm) {
247 if (rm->m_rdma_op)
248 rds_ib_send_unmap_rdma(ic, rm->m_rdma_op);
247 rds_ib_send_rdma_complete(rm, wc.status); 249 rds_ib_send_rdma_complete(rm, wc.status);
250 rds_message_put(rm);
251 }
248 } 252 }
249 253
250 oldest = (oldest + 1) % ic->i_send_ring.w_nr; 254 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
@@ -482,6 +486,13 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
482 BUG_ON(off % RDS_FRAG_SIZE); 486 BUG_ON(off % RDS_FRAG_SIZE);
483 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 487 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
484 488
489 /* Do not send cong updates to IB loopback */
490 if (conn->c_loopback
491 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
492 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
493 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
494 }
495
485 /* FIXME we may overallocate here */ 496 /* FIXME we may overallocate here */
486 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 497 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
487 i = 1; 498 i = 1;
@@ -574,8 +585,7 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
574 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 585 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
575 adv_credits += posted; 586 adv_credits += posted;
576 BUG_ON(adv_credits > 255); 587 BUG_ON(adv_credits > 255);
577 } else if (ic->i_rm != rm) 588 }
578 BUG();
579 589
580 send = &ic->i_sends[pos]; 590 send = &ic->i_sends[pos];
581 first = send; 591 first = send;
@@ -714,8 +724,8 @@ add_header:
714 ic->i_rm = prev->s_rm; 724 ic->i_rm = prev->s_rm;
715 prev->s_rm = NULL; 725 prev->s_rm = NULL;
716 } 726 }
717 /* Finesse this later */ 727
718 BUG(); 728 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
719 goto out; 729 goto out;
720 } 730 }
721 731
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
index 3e9460f935d8..a9d951b4fbae 100644
--- a/net/rds/iw_cm.c
+++ b/net/rds/iw_cm.c
@@ -157,9 +157,11 @@ static void rds_iw_qp_event_handler(struct ib_event *event, void *data)
157 case IB_EVENT_QP_REQ_ERR: 157 case IB_EVENT_QP_REQ_ERR:
158 case IB_EVENT_QP_FATAL: 158 case IB_EVENT_QP_FATAL:
159 default: 159 default:
160 rds_iw_conn_error(conn, "RDS/IW: Fatal QP Event %u - connection %pI4->%pI4...reconnecting\n", 160 rdsdebug("Fatal QP Event %u "
161 "- connection %pI4->%pI4, reconnecting\n",
161 event->event, &conn->c_laddr, 162 event->event, &conn->c_laddr,
162 &conn->c_faddr); 163 &conn->c_faddr);
164 rds_conn_drop(conn);
163 break; 165 break;
164 } 166 }
165} 167}
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index da43ee840ca3..3d479067d54d 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -469,8 +469,8 @@ static void rds_iw_send_ack(struct rds_iw_connection *ic, unsigned int adv_credi
469 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 469 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
470 470
471 rds_iw_stats_inc(s_iw_ack_send_failure); 471 rds_iw_stats_inc(s_iw_ack_send_failure);
472 /* Need to finesse this later. */ 472
473 BUG(); 473 rds_iw_conn_error(ic->conn, "sending ack failed\n");
474 } else 474 } else
475 rds_iw_stats_inc(s_iw_ack_sent); 475 rds_iw_stats_inc(s_iw_ack_sent);
476} 476}
diff --git a/net/rds/iw_send.c b/net/rds/iw_send.c
index 1379e9d66a78..52182ff7519e 100644
--- a/net/rds/iw_send.c
+++ b/net/rds/iw_send.c
@@ -616,8 +616,7 @@ int rds_iw_xmit(struct rds_connection *conn, struct rds_message *rm,
616 rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 616 rds_iw_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
617 adv_credits += posted; 617 adv_credits += posted;
618 BUG_ON(adv_credits > 255); 618 BUG_ON(adv_credits > 255);
619 } else if (ic->i_rm != rm) 619 }
620 BUG();
621 620
622 send = &ic->i_sends[pos]; 621 send = &ic->i_sends[pos];
623 first = send; 622 first = send;
diff --git a/net/rds/loop.c b/net/rds/loop.c
index 0d7a159158b8..dd9879379457 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -81,16 +81,9 @@ static int rds_loop_xmit_cong_map(struct rds_connection *conn,
81 struct rds_cong_map *map, 81 struct rds_cong_map *map,
82 unsigned long offset) 82 unsigned long offset)
83{ 83{
84 unsigned long i;
85
86 BUG_ON(offset); 84 BUG_ON(offset);
87 BUG_ON(map != conn->c_lcong); 85 BUG_ON(map != conn->c_lcong);
88 86
89 for (i = 0; i < RDS_CONG_MAP_PAGES; i++) {
90 memcpy((void *)conn->c_fcong->m_page_addrs[i],
91 (void *)map->m_page_addrs[i], PAGE_SIZE);
92 }
93
94 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 87 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
95 88
96 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; 89 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 5ce9437cad67..75fd13bb631b 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -439,8 +439,10 @@ void rds_rdma_free_op(struct rds_rdma_op *ro)
439 /* Mark page dirty if it was possibly modified, which 439 /* Mark page dirty if it was possibly modified, which
440 * is the case for a RDMA_READ which copies from remote 440 * is the case for a RDMA_READ which copies from remote
441 * to local memory */ 441 * to local memory */
442 if (!ro->r_write) 442 if (!ro->r_write) {
443 BUG_ON(in_interrupt());
443 set_page_dirty(page); 444 set_page_dirty(page);
445 }
444 put_page(page); 446 put_page(page);
445 } 447 }
446 448
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c
index 9ece910ea394..5ea82fc47c3e 100644
--- a/net/rds/rdma_transport.c
+++ b/net/rds/rdma_transport.c
@@ -101,7 +101,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
101 break; 101 break;
102 102
103 case RDMA_CM_EVENT_DISCONNECTED: 103 case RDMA_CM_EVENT_DISCONNECTED:
104 printk(KERN_WARNING "RDS/RDMA: DISCONNECT event - dropping connection " 104 rdsdebug("DISCONNECT event - dropping connection "
105 "%pI4->%pI4\n", &conn->c_laddr, 105 "%pI4->%pI4\n", &conn->c_laddr,
106 &conn->c_faddr); 106 &conn->c_faddr);
107 rds_conn_drop(conn); 107 rds_conn_drop(conn);
@@ -109,8 +109,7 @@ int rds_rdma_cm_event_handler(struct rdma_cm_id *cm_id,
109 109
110 default: 110 default:
111 /* things like device disconnect? */ 111 /* things like device disconnect? */
112 printk(KERN_ERR "unknown event %u\n", event->event); 112 printk(KERN_ERR "RDS: unknown event %u!\n", event->event);
113 BUG();
114 break; 113 break;
115 } 114 }
116 115
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 85d6f897ecc7..c224b5bb3ba9 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -388,6 +388,8 @@ struct rds_sock {
388 388
389 /* flag indicating we were congested or not */ 389 /* flag indicating we were congested or not */
390 int rs_congested; 390 int rs_congested;
391 /* seen congestion (ENOBUFS) when sending? */
392 int rs_seen_congestion;
391 393
392 /* rs_lock protects all these adjacent members before the newline */ 394 /* rs_lock protects all these adjacent members before the newline */
393 spinlock_t rs_lock; 395 spinlock_t rs_lock;
@@ -490,7 +492,7 @@ void rds_sock_put(struct rds_sock *rs);
490void rds_wake_sk_sleep(struct rds_sock *rs); 492void rds_wake_sk_sleep(struct rds_sock *rs);
491static inline void __rds_wake_sk_sleep(struct sock *sk) 493static inline void __rds_wake_sk_sleep(struct sock *sk)
492{ 494{
493 wait_queue_head_t *waitq = sk->sk_sleep; 495 wait_queue_head_t *waitq = sk_sleep(sk);
494 496
495 if (!sock_flag(sk, SOCK_DEAD) && waitq) 497 if (!sock_flag(sk, SOCK_DEAD) && waitq)
496 wake_up(waitq); 498 wake_up(waitq);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index e2a2b9344f7b..795a00b7f2cb 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -432,7 +432,7 @@ int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
432 break; 432 break;
433 } 433 }
434 434
435 timeo = wait_event_interruptible_timeout(*sk->sk_sleep, 435 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
436 (!list_empty(&rs->rs_notify_queue) || 436 (!list_empty(&rs->rs_notify_queue) ||
437 rs->rs_cong_notify || 437 rs->rs_cong_notify ||
438 rds_next_incoming(rs, &inc)), timeo); 438 rds_next_incoming(rs, &inc)), timeo);
diff --git a/net/rds/send.c b/net/rds/send.c
index f04b929ded92..9c1c6bcaa6c9 100644
--- a/net/rds/send.c
+++ b/net/rds/send.c
@@ -508,12 +508,13 @@ EXPORT_SYMBOL_GPL(rds_send_get_message);
508 */ 508 */
509void rds_send_remove_from_sock(struct list_head *messages, int status) 509void rds_send_remove_from_sock(struct list_head *messages, int status)
510{ 510{
511 unsigned long flags = 0; /* silence gcc :P */ 511 unsigned long flags;
512 struct rds_sock *rs = NULL; 512 struct rds_sock *rs = NULL;
513 struct rds_message *rm; 513 struct rds_message *rm;
514 514
515 local_irq_save(flags);
516 while (!list_empty(messages)) { 515 while (!list_empty(messages)) {
516 int was_on_sock = 0;
517
517 rm = list_entry(messages->next, struct rds_message, 518 rm = list_entry(messages->next, struct rds_message,
518 m_conn_item); 519 m_conn_item);
519 list_del_init(&rm->m_conn_item); 520 list_del_init(&rm->m_conn_item);
@@ -528,20 +529,19 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
528 * while we're messing with it. It does not prevent the 529 * while we're messing with it. It does not prevent the
529 * message from being removed from the socket, though. 530 * message from being removed from the socket, though.
530 */ 531 */
531 spin_lock(&rm->m_rs_lock); 532 spin_lock_irqsave(&rm->m_rs_lock, flags);
532 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) 533 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
533 goto unlock_and_drop; 534 goto unlock_and_drop;
534 535
535 if (rs != rm->m_rs) { 536 if (rs != rm->m_rs) {
536 if (rs) { 537 if (rs) {
537 spin_unlock(&rs->rs_lock);
538 rds_wake_sk_sleep(rs); 538 rds_wake_sk_sleep(rs);
539 sock_put(rds_rs_to_sk(rs)); 539 sock_put(rds_rs_to_sk(rs));
540 } 540 }
541 rs = rm->m_rs; 541 rs = rm->m_rs;
542 spin_lock(&rs->rs_lock);
543 sock_hold(rds_rs_to_sk(rs)); 542 sock_hold(rds_rs_to_sk(rs));
544 } 543 }
544 spin_lock(&rs->rs_lock);
545 545
546 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 546 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
547 struct rds_rdma_op *ro = rm->m_rdma_op; 547 struct rds_rdma_op *ro = rm->m_rdma_op;
@@ -558,21 +558,22 @@ void rds_send_remove_from_sock(struct list_head *messages, int status)
558 notifier->n_status = status; 558 notifier->n_status = status;
559 rm->m_rdma_op->r_notifier = NULL; 559 rm->m_rdma_op->r_notifier = NULL;
560 } 560 }
561 rds_message_put(rm); 561 was_on_sock = 1;
562 rm->m_rs = NULL; 562 rm->m_rs = NULL;
563 } 563 }
564 spin_unlock(&rs->rs_lock);
564 565
565unlock_and_drop: 566unlock_and_drop:
566 spin_unlock(&rm->m_rs_lock); 567 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
567 rds_message_put(rm); 568 rds_message_put(rm);
569 if (was_on_sock)
570 rds_message_put(rm);
568 } 571 }
569 572
570 if (rs) { 573 if (rs) {
571 spin_unlock(&rs->rs_lock);
572 rds_wake_sk_sleep(rs); 574 rds_wake_sk_sleep(rs);
573 sock_put(rds_rs_to_sk(rs)); 575 sock_put(rds_rs_to_sk(rs));
574 } 576 }
575 local_irq_restore(flags);
576} 577}
577 578
578/* 579/*
@@ -634,9 +635,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
634 list_move(&rm->m_sock_item, &list); 635 list_move(&rm->m_sock_item, &list);
635 rds_send_sndbuf_remove(rs, rm); 636 rds_send_sndbuf_remove(rs, rm);
636 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 637 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
637
638 /* If this is a RDMA operation, notify the app. */
639 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
640 } 638 }
641 639
642 /* order flag updates with the rs lock */ 640 /* order flag updates with the rs lock */
@@ -645,9 +643,6 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
645 643
646 spin_unlock_irqrestore(&rs->rs_lock, flags); 644 spin_unlock_irqrestore(&rs->rs_lock, flags);
647 645
648 if (wake)
649 rds_wake_sk_sleep(rs);
650
651 conn = NULL; 646 conn = NULL;
652 647
653 /* now remove the messages from the conn list as needed */ 648 /* now remove the messages from the conn list as needed */
@@ -655,6 +650,10 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
655 /* We do this here rather than in the loop above, so that 650 /* We do this here rather than in the loop above, so that
656 * we don't have to nest m_rs_lock under rs->rs_lock */ 651 * we don't have to nest m_rs_lock under rs->rs_lock */
657 spin_lock_irqsave(&rm->m_rs_lock, flags2); 652 spin_lock_irqsave(&rm->m_rs_lock, flags2);
653 /* If this is a RDMA operation, notify the app. */
654 spin_lock(&rs->rs_lock);
655 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
656 spin_unlock(&rs->rs_lock);
658 rm->m_rs = NULL; 657 rm->m_rs = NULL;
659 spin_unlock_irqrestore(&rm->m_rs_lock, flags2); 658 spin_unlock_irqrestore(&rm->m_rs_lock, flags2);
660 659
@@ -683,6 +682,9 @@ void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
683 if (conn) 682 if (conn)
684 spin_unlock_irqrestore(&conn->c_lock, flags); 683 spin_unlock_irqrestore(&conn->c_lock, flags);
685 684
685 if (wake)
686 rds_wake_sk_sleep(rs);
687
686 while (!list_empty(&list)) { 688 while (!list_empty(&list)) {
687 rm = list_entry(list.next, struct rds_message, m_sock_item); 689 rm = list_entry(list.next, struct rds_message, m_sock_item);
688 list_del_init(&rm->m_sock_item); 690 list_del_init(&rm->m_sock_item);
@@ -816,7 +818,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
816 int ret = 0; 818 int ret = 0;
817 int queued = 0, allocated_mr = 0; 819 int queued = 0, allocated_mr = 0;
818 int nonblock = msg->msg_flags & MSG_DONTWAIT; 820 int nonblock = msg->msg_flags & MSG_DONTWAIT;
819 long timeo = sock_rcvtimeo(sk, nonblock); 821 long timeo = sock_sndtimeo(sk, nonblock);
820 822
821 /* Mirror Linux UDP mirror of BSD error message compatibility */ 823 /* Mirror Linux UDP mirror of BSD error message compatibility */
822 /* XXX: Perhaps MSG_MORE someday */ 824 /* XXX: Perhaps MSG_MORE someday */
@@ -895,8 +897,10 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
895 queue_delayed_work(rds_wq, &conn->c_conn_w, 0); 897 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
896 898
897 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 899 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
898 if (ret) 900 if (ret) {
901 rs->rs_seen_congestion = 1;
899 goto out; 902 goto out;
903 }
900 904
901 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, 905 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
902 dport, &queued)) { 906 dport, &queued)) {
@@ -911,7 +915,7 @@ int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
911 goto out; 915 goto out;
912 } 916 }
913 917
914 timeo = wait_event_interruptible_timeout(*sk->sk_sleep, 918 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
915 rds_send_queue_rm(rs, conn, rm, 919 rds_send_queue_rm(rs, conn, rm,
916 rs->rs_bound_port, 920 rs->rs_bound_port,
917 dport, 921 dport,
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index e08ec912d8b0..1aba6878fa5d 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -98,6 +98,7 @@ int rds_tcp_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
98 goto out; 98 goto out;
99 } 99 }
100 100
101 rds_stats_add(s_copy_to_user, to_copy);
101 size -= to_copy; 102 size -= to_copy;
102 ret += to_copy; 103 ret += to_copy;
103 skb_off += to_copy; 104 skb_off += to_copy;
diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 34fdcc059e54..a28b895ff0d1 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -240,7 +240,9 @@ void rds_tcp_write_space(struct sock *sk)
240 tc->t_last_seen_una = rds_tcp_snd_una(tc); 240 tc->t_last_seen_una = rds_tcp_snd_una(tc);
241 rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked); 241 rds_send_drop_acked(conn, rds_tcp_snd_una(tc), rds_tcp_is_acked);
242 242
243 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 243 if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
244 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
245
244out: 246out:
245 read_unlock(&sk->sk_callback_lock); 247 read_unlock(&sk->sk_callback_lock);
246 248
diff --git a/net/rds/threads.c b/net/rds/threads.c
index 00fa10e59af8..786c20eaaf5e 100644
--- a/net/rds/threads.c
+++ b/net/rds/threads.c
@@ -259,7 +259,7 @@ void rds_threads_exit(void)
259 259
260int __init rds_threads_init(void) 260int __init rds_threads_init(void)
261{ 261{
262 rds_wq = create_singlethread_workqueue("krdsd"); 262 rds_wq = create_workqueue("krdsd");
263 if (rds_wq == NULL) 263 if (rds_wq == NULL)
264 return -ENOMEM; 264 return -ENOMEM;
265 265
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index a9fa86f65983..51875a0c5d48 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -629,6 +629,49 @@ static ssize_t rfkill_persistent_show(struct device *dev,
629 return sprintf(buf, "%d\n", rfkill->persistent); 629 return sprintf(buf, "%d\n", rfkill->persistent);
630} 630}
631 631
632static ssize_t rfkill_hard_show(struct device *dev,
633 struct device_attribute *attr,
634 char *buf)
635{
636 struct rfkill *rfkill = to_rfkill(dev);
637
638 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 );
639}
640
641static ssize_t rfkill_soft_show(struct device *dev,
642 struct device_attribute *attr,
643 char *buf)
644{
645 struct rfkill *rfkill = to_rfkill(dev);
646
647 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 );
648}
649
650static ssize_t rfkill_soft_store(struct device *dev,
651 struct device_attribute *attr,
652 const char *buf, size_t count)
653{
654 struct rfkill *rfkill = to_rfkill(dev);
655 unsigned long state;
656 int err;
657
658 if (!capable(CAP_NET_ADMIN))
659 return -EPERM;
660
661 err = strict_strtoul(buf, 0, &state);
662 if (err)
663 return err;
664
665 if (state > 1 )
666 return -EINVAL;
667
668 mutex_lock(&rfkill_global_mutex);
669 rfkill_set_block(rfkill, state);
670 mutex_unlock(&rfkill_global_mutex);
671
672 return err ?: count;
673}
674
632static u8 user_state_from_blocked(unsigned long state) 675static u8 user_state_from_blocked(unsigned long state)
633{ 676{
634 if (state & RFKILL_BLOCK_HW) 677 if (state & RFKILL_BLOCK_HW)
@@ -644,14 +687,8 @@ static ssize_t rfkill_state_show(struct device *dev,
644 char *buf) 687 char *buf)
645{ 688{
646 struct rfkill *rfkill = to_rfkill(dev); 689 struct rfkill *rfkill = to_rfkill(dev);
647 unsigned long flags;
648 u32 state;
649
650 spin_lock_irqsave(&rfkill->lock, flags);
651 state = rfkill->state;
652 spin_unlock_irqrestore(&rfkill->lock, flags);
653 690
654 return sprintf(buf, "%d\n", user_state_from_blocked(state)); 691 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state));
655} 692}
656 693
657static ssize_t rfkill_state_store(struct device *dev, 694static ssize_t rfkill_state_store(struct device *dev,
@@ -701,6 +738,8 @@ static struct device_attribute rfkill_dev_attrs[] = {
701 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL), 738 __ATTR(persistent, S_IRUGO, rfkill_persistent_show, NULL),
702 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store), 739 __ATTR(state, S_IRUGO|S_IWUSR, rfkill_state_show, rfkill_state_store),
703 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store), 740 __ATTR(claim, S_IRUGO|S_IWUSR, rfkill_claim_show, rfkill_claim_store),
741 __ATTR(soft, S_IRUGO|S_IWUSR, rfkill_soft_show, rfkill_soft_store),
742 __ATTR(hard, S_IRUGO, rfkill_hard_show, NULL),
704 __ATTR_NULL 743 __ATTR_NULL
705}; 744};
706 745
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 4fb711a035f4..8e45e76a95f5 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -845,7 +845,7 @@ rose_try_next_neigh:
845 DEFINE_WAIT(wait); 845 DEFINE_WAIT(wait);
846 846
847 for (;;) { 847 for (;;) {
848 prepare_to_wait(sk->sk_sleep, &wait, 848 prepare_to_wait(sk_sleep(sk), &wait,
849 TASK_INTERRUPTIBLE); 849 TASK_INTERRUPTIBLE);
850 if (sk->sk_state != TCP_SYN_SENT) 850 if (sk->sk_state != TCP_SYN_SENT)
851 break; 851 break;
@@ -858,7 +858,7 @@ rose_try_next_neigh:
858 err = -ERESTARTSYS; 858 err = -ERESTARTSYS;
859 break; 859 break;
860 } 860 }
861 finish_wait(sk->sk_sleep, &wait); 861 finish_wait(sk_sleep(sk), &wait);
862 862
863 if (err) 863 if (err)
864 goto out_release; 864 goto out_release;
@@ -911,7 +911,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
911 * hooked into the SABM we saved 911 * hooked into the SABM we saved
912 */ 912 */
913 for (;;) { 913 for (;;) {
914 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 914 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
915 915
916 skb = skb_dequeue(&sk->sk_receive_queue); 916 skb = skb_dequeue(&sk->sk_receive_queue);
917 if (skb) 917 if (skb)
@@ -930,7 +930,7 @@ static int rose_accept(struct socket *sock, struct socket *newsock, int flags)
930 err = -ERESTARTSYS; 930 err = -ERESTARTSYS;
931 break; 931 break;
932 } 932 }
933 finish_wait(sk->sk_sleep, &wait); 933 finish_wait(sk_sleep(sk), &wait);
934 if (err) 934 if (err)
935 goto out_release; 935 goto out_release;
936 936
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c060095b27ce..c432d76f415e 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -65,7 +65,7 @@ static void rxrpc_write_space(struct sock *sk)
65 read_lock(&sk->sk_callback_lock); 65 read_lock(&sk->sk_callback_lock);
66 if (rxrpc_writable(sk)) { 66 if (rxrpc_writable(sk)) {
67 if (sk_has_sleeper(sk)) 67 if (sk_has_sleeper(sk))
68 wake_up_interruptible(sk->sk_sleep); 68 wake_up_interruptible(sk_sleep(sk));
69 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 69 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
70 } 70 }
71 read_unlock(&sk->sk_callback_lock); 71 read_unlock(&sk->sk_callback_lock);
@@ -589,7 +589,7 @@ static unsigned int rxrpc_poll(struct file *file, struct socket *sock,
589 unsigned int mask; 589 unsigned int mask;
590 struct sock *sk = sock->sk; 590 struct sock *sk = sock->sk;
591 591
592 sock_poll_wait(file, sk->sk_sleep, wait); 592 sock_poll_wait(file, sk_sleep(sk), wait);
593 mask = 0; 593 mask = 0;
594 594
595 /* the socket is readable if there are any messages waiting on the Rx 595 /* the socket is readable if there are any messages waiting on the Rx
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index d8e0171d9a4b..019045174fc3 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -668,7 +668,8 @@ nlmsg_failure:
668} 668}
669 669
670static int 670static int
671act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event) 671act_get_notify(struct net *net, u32 pid, struct nlmsghdr *n,
672 struct tc_action *a, int event)
672{ 673{
673 struct sk_buff *skb; 674 struct sk_buff *skb;
674 675
@@ -680,7 +681,7 @@ act_get_notify(u32 pid, struct nlmsghdr *n, struct tc_action *a, int event)
680 return -EINVAL; 681 return -EINVAL;
681 } 682 }
682 683
683 return rtnl_unicast(skb, &init_net, pid); 684 return rtnl_unicast(skb, net, pid);
684} 685}
685 686
686static struct tc_action * 687static struct tc_action *
@@ -750,7 +751,8 @@ static struct tc_action *create_a(int i)
750 return act; 751 return act;
751} 752}
752 753
753static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid) 754static int tca_action_flush(struct net *net, struct nlattr *nla,
755 struct nlmsghdr *n, u32 pid)
754{ 756{
755 struct sk_buff *skb; 757 struct sk_buff *skb;
756 unsigned char *b; 758 unsigned char *b;
@@ -809,7 +811,7 @@ static int tca_action_flush(struct nlattr *nla, struct nlmsghdr *n, u32 pid)
809 nlh->nlmsg_flags |= NLM_F_ROOT; 811 nlh->nlmsg_flags |= NLM_F_ROOT;
810 module_put(a->ops->owner); 812 module_put(a->ops->owner);
811 kfree(a); 813 kfree(a);
812 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 814 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
813 if (err > 0) 815 if (err > 0)
814 return 0; 816 return 0;
815 817
@@ -826,7 +828,8 @@ noflush_out:
826} 828}
827 829
828static int 830static int
829tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event) 831tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
832 u32 pid, int event)
830{ 833{
831 int i, ret; 834 int i, ret;
832 struct nlattr *tb[TCA_ACT_MAX_PRIO+1]; 835 struct nlattr *tb[TCA_ACT_MAX_PRIO+1];
@@ -838,7 +841,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
838 841
839 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) { 842 if (event == RTM_DELACTION && n->nlmsg_flags&NLM_F_ROOT) {
840 if (tb[1] != NULL) 843 if (tb[1] != NULL)
841 return tca_action_flush(tb[1], n, pid); 844 return tca_action_flush(net, tb[1], n, pid);
842 else 845 else
843 return -EINVAL; 846 return -EINVAL;
844 } 847 }
@@ -859,7 +862,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
859 } 862 }
860 863
861 if (event == RTM_GETACTION) 864 if (event == RTM_GETACTION)
862 ret = act_get_notify(pid, n, head, event); 865 ret = act_get_notify(net, pid, n, head, event);
863 else { /* delete */ 866 else { /* delete */
864 struct sk_buff *skb; 867 struct sk_buff *skb;
865 868
@@ -878,7 +881,7 @@ tca_action_gd(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int event)
878 881
879 /* now do the delete */ 882 /* now do the delete */
880 tcf_action_destroy(head, 0); 883 tcf_action_destroy(head, 0);
881 ret = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 884 ret = rtnetlink_send(skb, net, pid, RTNLGRP_TC,
882 n->nlmsg_flags&NLM_F_ECHO); 885 n->nlmsg_flags&NLM_F_ECHO);
883 if (ret > 0) 886 if (ret > 0)
884 return 0; 887 return 0;
@@ -889,8 +892,8 @@ err:
889 return ret; 892 return ret;
890} 893}
891 894
892static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event, 895static int tcf_add_notify(struct net *net, struct tc_action *a,
893 u16 flags) 896 u32 pid, u32 seq, int event, u16 flags)
894{ 897{
895 struct tcamsg *t; 898 struct tcamsg *t;
896 struct nlmsghdr *nlh; 899 struct nlmsghdr *nlh;
@@ -923,7 +926,7 @@ static int tcf_add_notify(struct tc_action *a, u32 pid, u32 seq, int event,
923 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 926 nlh->nlmsg_len = skb_tail_pointer(skb) - b;
924 NETLINK_CB(skb).dst_group = RTNLGRP_TC; 927 NETLINK_CB(skb).dst_group = RTNLGRP_TC;
925 928
926 err = rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, flags&NLM_F_ECHO); 929 err = rtnetlink_send(skb, net, pid, RTNLGRP_TC, flags&NLM_F_ECHO);
927 if (err > 0) 930 if (err > 0)
928 err = 0; 931 err = 0;
929 return err; 932 return err;
@@ -936,7 +939,8 @@ nlmsg_failure:
936 939
937 940
938static int 941static int
939tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr) 942tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
943 u32 pid, int ovr)
940{ 944{
941 int ret = 0; 945 int ret = 0;
942 struct tc_action *act; 946 struct tc_action *act;
@@ -954,7 +958,7 @@ tcf_action_add(struct nlattr *nla, struct nlmsghdr *n, u32 pid, int ovr)
954 /* dump then free all the actions after update; inserted policy 958 /* dump then free all the actions after update; inserted policy
955 * stays intact 959 * stays intact
956 * */ 960 * */
957 ret = tcf_add_notify(act, pid, seq, RTM_NEWACTION, n->nlmsg_flags); 961 ret = tcf_add_notify(net, act, pid, seq, RTM_NEWACTION, n->nlmsg_flags);
958 for (a = act; a; a = act) { 962 for (a = act; a; a = act) {
959 act = a->next; 963 act = a->next;
960 kfree(a); 964 kfree(a);
@@ -970,9 +974,6 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
970 u32 pid = skb ? NETLINK_CB(skb).pid : 0; 974 u32 pid = skb ? NETLINK_CB(skb).pid : 0;
971 int ret = 0, ovr = 0; 975 int ret = 0, ovr = 0;
972 976
973 if (!net_eq(net, &init_net))
974 return -EINVAL;
975
976 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 977 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
977 if (ret < 0) 978 if (ret < 0)
978 return ret; 979 return ret;
@@ -995,15 +996,17 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
995 if (n->nlmsg_flags&NLM_F_REPLACE) 996 if (n->nlmsg_flags&NLM_F_REPLACE)
996 ovr = 1; 997 ovr = 1;
997replay: 998replay:
998 ret = tcf_action_add(tca[TCA_ACT_TAB], n, pid, ovr); 999 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, pid, ovr);
999 if (ret == -EAGAIN) 1000 if (ret == -EAGAIN)
1000 goto replay; 1001 goto replay;
1001 break; 1002 break;
1002 case RTM_DELACTION: 1003 case RTM_DELACTION:
1003 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_DELACTION); 1004 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1005 pid, RTM_DELACTION);
1004 break; 1006 break;
1005 case RTM_GETACTION: 1007 case RTM_GETACTION:
1006 ret = tca_action_gd(tca[TCA_ACT_TAB], n, pid, RTM_GETACTION); 1008 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n,
1009 pid, RTM_GETACTION);
1007 break; 1010 break;
1008 default: 1011 default:
1009 BUG(); 1012 BUG();
@@ -1043,7 +1046,6 @@ find_dump_kind(const struct nlmsghdr *n)
1043static int 1046static int
1044tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1047tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1045{ 1048{
1046 struct net *net = sock_net(skb->sk);
1047 struct nlmsghdr *nlh; 1049 struct nlmsghdr *nlh;
1048 unsigned char *b = skb_tail_pointer(skb); 1050 unsigned char *b = skb_tail_pointer(skb);
1049 struct nlattr *nest; 1051 struct nlattr *nest;
@@ -1053,9 +1055,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
1053 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh); 1055 struct tcamsg *t = (struct tcamsg *) NLMSG_DATA(cb->nlh);
1054 struct nlattr *kind = find_dump_kind(cb->nlh); 1056 struct nlattr *kind = find_dump_kind(cb->nlh);
1055 1057
1056 if (!net_eq(net, &init_net))
1057 return 0;
1058
1059 if (kind == NULL) { 1058 if (kind == NULL) {
1060 printk("tc_dump_action: action bad kind\n"); 1059 printk("tc_dump_action: action bad kind\n");
1061 return 0; 1060 return 0;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index f082b27ff46d..5fd0c28ef79a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -99,8 +99,9 @@ out:
99} 99}
100EXPORT_SYMBOL(unregister_tcf_proto_ops); 100EXPORT_SYMBOL(unregister_tcf_proto_ops);
101 101
102static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 102static int tfilter_notify(struct net *net, struct sk_buff *oskb,
103 struct tcf_proto *tp, unsigned long fh, int event); 103 struct nlmsghdr *n, struct tcf_proto *tp,
104 unsigned long fh, int event);
104 105
105 106
106/* Select new prio value from the range, managed by kernel. */ 107/* Select new prio value from the range, managed by kernel. */
@@ -138,9 +139,6 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
138 int err; 139 int err;
139 int tp_created = 0; 140 int tp_created = 0;
140 141
141 if (!net_eq(net, &init_net))
142 return -EINVAL;
143
144replay: 142replay:
145 t = NLMSG_DATA(n); 143 t = NLMSG_DATA(n);
146 protocol = TC_H_MIN(t->tcm_info); 144 protocol = TC_H_MIN(t->tcm_info);
@@ -159,7 +157,7 @@ replay:
159 /* Find head of filter chain. */ 157 /* Find head of filter chain. */
160 158
161 /* Find link */ 159 /* Find link */
162 dev = __dev_get_by_index(&init_net, t->tcm_ifindex); 160 dev = __dev_get_by_index(net, t->tcm_ifindex);
163 if (dev == NULL) 161 if (dev == NULL)
164 return -ENODEV; 162 return -ENODEV;
165 163
@@ -283,7 +281,7 @@ replay:
283 *back = tp->next; 281 *back = tp->next;
284 spin_unlock_bh(root_lock); 282 spin_unlock_bh(root_lock);
285 283
286 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 284 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
287 tcf_destroy(tp); 285 tcf_destroy(tp);
288 err = 0; 286 err = 0;
289 goto errout; 287 goto errout;
@@ -306,10 +304,10 @@ replay:
306 case RTM_DELTFILTER: 304 case RTM_DELTFILTER:
307 err = tp->ops->delete(tp, fh); 305 err = tp->ops->delete(tp, fh);
308 if (err == 0) 306 if (err == 0)
309 tfilter_notify(skb, n, tp, fh, RTM_DELTFILTER); 307 tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
310 goto errout; 308 goto errout;
311 case RTM_GETTFILTER: 309 case RTM_GETTFILTER:
312 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 310 err = tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
313 goto errout; 311 goto errout;
314 default: 312 default:
315 err = -EINVAL; 313 err = -EINVAL;
@@ -325,7 +323,7 @@ replay:
325 *back = tp; 323 *back = tp;
326 spin_unlock_bh(root_lock); 324 spin_unlock_bh(root_lock);
327 } 325 }
328 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER); 326 tfilter_notify(net, skb, n, tp, fh, RTM_NEWTFILTER);
329 } else { 327 } else {
330 if (tp_created) 328 if (tp_created)
331 tcf_destroy(tp); 329 tcf_destroy(tp);
@@ -371,8 +369,9 @@ nla_put_failure:
371 return -1; 369 return -1;
372} 370}
373 371
374static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n, 372static int tfilter_notify(struct net *net, struct sk_buff *oskb,
375 struct tcf_proto *tp, unsigned long fh, int event) 373 struct nlmsghdr *n, struct tcf_proto *tp,
374 unsigned long fh, int event)
376{ 375{
377 struct sk_buff *skb; 376 struct sk_buff *skb;
378 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 377 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -386,7 +385,7 @@ static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
386 return -EINVAL; 385 return -EINVAL;
387 } 386 }
388 387
389 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, 388 return rtnetlink_send(skb, net, pid, RTNLGRP_TC,
390 n->nlmsg_flags & NLM_F_ECHO); 389 n->nlmsg_flags & NLM_F_ECHO);
391} 390}
392 391
@@ -419,12 +418,9 @@ static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
419 const struct Qdisc_class_ops *cops; 418 const struct Qdisc_class_ops *cops;
420 struct tcf_dump_args arg; 419 struct tcf_dump_args arg;
421 420
422 if (!net_eq(net, &init_net))
423 return 0;
424
425 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 421 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
426 return skb->len; 422 return skb->len;
427 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 423 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
428 return skb->len; 424 return skb->len;
429 425
430 if (!tcm->tcm_parent) 426 if (!tcm->tcm_parent)
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 17c5dfc67320..593eac056e8d 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -773,10 +773,10 @@ static int __init init_u32(void)
773 printk(" Performance counters on\n"); 773 printk(" Performance counters on\n");
774#endif 774#endif
775#ifdef CONFIG_NET_CLS_IND 775#ifdef CONFIG_NET_CLS_IND
776 printk(" input device check on \n"); 776 printk(" input device check on\n");
777#endif 777#endif
778#ifdef CONFIG_NET_CLS_ACT 778#ifdef CONFIG_NET_CLS_ACT
779 printk(" Actions configured \n"); 779 printk(" Actions configured\n");
780#endif 780#endif
781 return register_tcf_proto_ops(&cls_u32_ops); 781 return register_tcf_proto_ops(&cls_u32_ops);
782} 782}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 145268ca57cf..9839b26674f4 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -35,10 +35,12 @@
35#include <net/netlink.h> 35#include <net/netlink.h>
36#include <net/pkt_sched.h> 36#include <net/pkt_sched.h>
37 37
38static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid, 38static int qdisc_notify(struct net *net, struct sk_buff *oskb,
39 struct nlmsghdr *n, u32 clid,
39 struct Qdisc *old, struct Qdisc *new); 40 struct Qdisc *old, struct Qdisc *new);
40static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 41static int tclass_notify(struct net *net, struct sk_buff *oskb,
41 struct Qdisc *q, unsigned long cl, int event); 42 struct nlmsghdr *n, struct Qdisc *q,
43 unsigned long cl, int event);
42 44
43/* 45/*
44 46
@@ -639,11 +641,12 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
639} 641}
640EXPORT_SYMBOL(qdisc_tree_decrease_qlen); 642EXPORT_SYMBOL(qdisc_tree_decrease_qlen);
641 643
642static void notify_and_destroy(struct sk_buff *skb, struct nlmsghdr *n, u32 clid, 644static void notify_and_destroy(struct net *net, struct sk_buff *skb,
645 struct nlmsghdr *n, u32 clid,
643 struct Qdisc *old, struct Qdisc *new) 646 struct Qdisc *old, struct Qdisc *new)
644{ 647{
645 if (new || old) 648 if (new || old)
646 qdisc_notify(skb, n, clid, old, new); 649 qdisc_notify(net, skb, n, clid, old, new);
647 650
648 if (old) 651 if (old)
649 qdisc_destroy(old); 652 qdisc_destroy(old);
@@ -663,6 +666,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
663 struct Qdisc *new, struct Qdisc *old) 666 struct Qdisc *new, struct Qdisc *old)
664{ 667{
665 struct Qdisc *q = old; 668 struct Qdisc *q = old;
669 struct net *net = dev_net(dev);
666 int err = 0; 670 int err = 0;
667 671
668 if (parent == NULL) { 672 if (parent == NULL) {
@@ -699,12 +703,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
699 } 703 }
700 704
701 if (!ingress) { 705 if (!ingress) {
702 notify_and_destroy(skb, n, classid, dev->qdisc, new); 706 notify_and_destroy(net, skb, n, classid,
707 dev->qdisc, new);
703 if (new && !new->ops->attach) 708 if (new && !new->ops->attach)
704 atomic_inc(&new->refcnt); 709 atomic_inc(&new->refcnt);
705 dev->qdisc = new ? : &noop_qdisc; 710 dev->qdisc = new ? : &noop_qdisc;
706 } else { 711 } else {
707 notify_and_destroy(skb, n, classid, old, new); 712 notify_and_destroy(net, skb, n, classid, old, new);
708 } 713 }
709 714
710 if (dev->flags & IFF_UP) 715 if (dev->flags & IFF_UP)
@@ -722,7 +727,7 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
722 err = -ENOENT; 727 err = -ENOENT;
723 } 728 }
724 if (!err) 729 if (!err)
725 notify_and_destroy(skb, n, classid, old, new); 730 notify_and_destroy(net, skb, n, classid, old, new);
726 } 731 }
727 return err; 732 return err;
728} 733}
@@ -948,10 +953,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
948 struct Qdisc *p = NULL; 953 struct Qdisc *p = NULL;
949 int err; 954 int err;
950 955
951 if (!net_eq(net, &init_net)) 956 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
952 return -EINVAL;
953
954 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
955 return -ENODEV; 957 return -ENODEV;
956 958
957 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 959 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -991,7 +993,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
991 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0) 993 if ((err = qdisc_graft(dev, p, skb, n, clid, NULL, q)) != 0)
992 return err; 994 return err;
993 } else { 995 } else {
994 qdisc_notify(skb, n, clid, NULL, q); 996 qdisc_notify(net, skb, n, clid, NULL, q);
995 } 997 }
996 return 0; 998 return 0;
997} 999}
@@ -1010,16 +1012,13 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1010 struct Qdisc *q, *p; 1012 struct Qdisc *q, *p;
1011 int err; 1013 int err;
1012 1014
1013 if (!net_eq(net, &init_net))
1014 return -EINVAL;
1015
1016replay: 1015replay:
1017 /* Reinit, just in case something touches this. */ 1016 /* Reinit, just in case something touches this. */
1018 tcm = NLMSG_DATA(n); 1017 tcm = NLMSG_DATA(n);
1019 clid = tcm->tcm_parent; 1018 clid = tcm->tcm_parent;
1020 q = p = NULL; 1019 q = p = NULL;
1021 1020
1022 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 1021 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1023 return -ENODEV; 1022 return -ENODEV;
1024 1023
1025 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1024 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1106,7 +1105,7 @@ replay:
1106 return -EINVAL; 1105 return -EINVAL;
1107 err = qdisc_change(q, tca); 1106 err = qdisc_change(q, tca);
1108 if (err == 0) 1107 if (err == 0)
1109 qdisc_notify(skb, n, clid, NULL, q); 1108 qdisc_notify(net, skb, n, clid, NULL, q);
1110 return err; 1109 return err;
1111 1110
1112create_n_graft: 1111create_n_graft:
@@ -1196,8 +1195,9 @@ nla_put_failure:
1196 return -1; 1195 return -1;
1197} 1196}
1198 1197
1199static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, 1198static int qdisc_notify(struct net *net, struct sk_buff *oskb,
1200 u32 clid, struct Qdisc *old, struct Qdisc *new) 1199 struct nlmsghdr *n, u32 clid,
1200 struct Qdisc *old, struct Qdisc *new)
1201{ 1201{
1202 struct sk_buff *skb; 1202 struct sk_buff *skb;
1203 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1203 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1216,7 +1216,7 @@ static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1216 } 1216 }
1217 1217
1218 if (skb->len) 1218 if (skb->len)
1219 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1219 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1220 1220
1221err_out: 1221err_out:
1222 kfree_skb(skb); 1222 kfree_skb(skb);
@@ -1275,15 +1275,12 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
1275 int s_idx, s_q_idx; 1275 int s_idx, s_q_idx;
1276 struct net_device *dev; 1276 struct net_device *dev;
1277 1277
1278 if (!net_eq(net, &init_net))
1279 return 0;
1280
1281 s_idx = cb->args[0]; 1278 s_idx = cb->args[0];
1282 s_q_idx = q_idx = cb->args[1]; 1279 s_q_idx = q_idx = cb->args[1];
1283 1280
1284 rcu_read_lock(); 1281 rcu_read_lock();
1285 idx = 0; 1282 idx = 0;
1286 for_each_netdev_rcu(&init_net, dev) { 1283 for_each_netdev_rcu(net, dev) {
1287 struct netdev_queue *dev_queue; 1284 struct netdev_queue *dev_queue;
1288 1285
1289 if (idx < s_idx) 1286 if (idx < s_idx)
@@ -1335,10 +1332,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1335 u32 qid = TC_H_MAJ(clid); 1332 u32 qid = TC_H_MAJ(clid);
1336 int err; 1333 int err;
1337 1334
1338 if (!net_eq(net, &init_net)) 1335 if ((dev = __dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1339 return -EINVAL;
1340
1341 if ((dev = __dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL)
1342 return -ENODEV; 1336 return -ENODEV;
1343 1337
1344 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL); 1338 err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1419,10 +1413,10 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1419 if (cops->delete) 1413 if (cops->delete)
1420 err = cops->delete(q, cl); 1414 err = cops->delete(q, cl);
1421 if (err == 0) 1415 if (err == 0)
1422 tclass_notify(skb, n, q, cl, RTM_DELTCLASS); 1416 tclass_notify(net, skb, n, q, cl, RTM_DELTCLASS);
1423 goto out; 1417 goto out;
1424 case RTM_GETTCLASS: 1418 case RTM_GETTCLASS:
1425 err = tclass_notify(skb, n, q, cl, RTM_NEWTCLASS); 1419 err = tclass_notify(net, skb, n, q, cl, RTM_NEWTCLASS);
1426 goto out; 1420 goto out;
1427 default: 1421 default:
1428 err = -EINVAL; 1422 err = -EINVAL;
@@ -1435,7 +1429,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
1435 if (cops->change) 1429 if (cops->change)
1436 err = cops->change(q, clid, pid, tca, &new_cl); 1430 err = cops->change(q, clid, pid, tca, &new_cl);
1437 if (err == 0) 1431 if (err == 0)
1438 tclass_notify(skb, n, q, new_cl, RTM_NEWTCLASS); 1432 tclass_notify(net, skb, n, q, new_cl, RTM_NEWTCLASS);
1439 1433
1440out: 1434out:
1441 if (cl) 1435 if (cl)
@@ -1487,8 +1481,9 @@ nla_put_failure:
1487 return -1; 1481 return -1;
1488} 1482}
1489 1483
1490static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n, 1484static int tclass_notify(struct net *net, struct sk_buff *oskb,
1491 struct Qdisc *q, unsigned long cl, int event) 1485 struct nlmsghdr *n, struct Qdisc *q,
1486 unsigned long cl, int event)
1492{ 1487{
1493 struct sk_buff *skb; 1488 struct sk_buff *skb;
1494 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0; 1489 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
@@ -1502,7 +1497,7 @@ static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
1502 return -EINVAL; 1497 return -EINVAL;
1503 } 1498 }
1504 1499
1505 return rtnetlink_send(skb, &init_net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO); 1500 return rtnetlink_send(skb, net, pid, RTNLGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
1506} 1501}
1507 1502
1508struct qdisc_dump_args 1503struct qdisc_dump_args
@@ -1577,12 +1572,9 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
1577 struct net_device *dev; 1572 struct net_device *dev;
1578 int t, s_t; 1573 int t, s_t;
1579 1574
1580 if (!net_eq(net, &init_net))
1581 return 0;
1582
1583 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm))) 1575 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
1584 return 0; 1576 return 0;
1585 if ((dev = dev_get_by_index(&init_net, tcm->tcm_ifindex)) == NULL) 1577 if ((dev = dev_get_by_index(net, tcm->tcm_ifindex)) == NULL)
1586 return 0; 1578 return 0;
1587 1579
1588 s_t = cb->args[0]; 1580 s_t = cb->args[0];
@@ -1692,7 +1684,7 @@ static int psched_show(struct seq_file *seq, void *v)
1692 1684
1693static int psched_open(struct inode *inode, struct file *file) 1685static int psched_open(struct inode *inode, struct file *file)
1694{ 1686{
1695 return single_open(file, psched_show, PDE(inode)->data); 1687 return single_open(file, psched_show, NULL);
1696} 1688}
1697 1689
1698static const struct file_operations psched_fops = { 1690static const struct file_operations psched_fops = {
@@ -1702,15 +1694,53 @@ static const struct file_operations psched_fops = {
1702 .llseek = seq_lseek, 1694 .llseek = seq_lseek,
1703 .release = single_release, 1695 .release = single_release,
1704}; 1696};
1697
1698static int __net_init psched_net_init(struct net *net)
1699{
1700 struct proc_dir_entry *e;
1701
1702 e = proc_net_fops_create(net, "psched", 0, &psched_fops);
1703 if (e == NULL)
1704 return -ENOMEM;
1705
1706 return 0;
1707}
1708
1709static void __net_exit psched_net_exit(struct net *net)
1710{
1711 proc_net_remove(net, "psched");
1712}
1713#else
1714static int __net_init psched_net_init(struct net *net)
1715{
1716 return 0;
1717}
1718
1719static void __net_exit psched_net_exit(struct net *net)
1720{
1721}
1705#endif 1722#endif
1706 1723
1724static struct pernet_operations psched_net_ops = {
1725 .init = psched_net_init,
1726 .exit = psched_net_exit,
1727};
1728
1707static int __init pktsched_init(void) 1729static int __init pktsched_init(void)
1708{ 1730{
1731 int err;
1732
1733 err = register_pernet_subsys(&psched_net_ops);
1734 if (err) {
1735 printk(KERN_ERR "pktsched_init: "
1736 "cannot initialize per netns operations\n");
1737 return err;
1738 }
1739
1709 register_qdisc(&pfifo_qdisc_ops); 1740 register_qdisc(&pfifo_qdisc_ops);
1710 register_qdisc(&bfifo_qdisc_ops); 1741 register_qdisc(&bfifo_qdisc_ops);
1711 register_qdisc(&pfifo_head_drop_qdisc_ops); 1742 register_qdisc(&pfifo_head_drop_qdisc_ops);
1712 register_qdisc(&mq_qdisc_ops); 1743 register_qdisc(&mq_qdisc_ops);
1713 proc_net_fops_create(&init_net, "psched", 0, &psched_fops);
1714 1744
1715 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL); 1745 rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL);
1716 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL); 1746 rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index ff4dd53eeff0..aeddabfb8e4e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -529,7 +529,7 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
529 unsigned int size; 529 unsigned int size;
530 int err = -ENOBUFS; 530 int err = -ENOBUFS;
531 531
532 /* ensure that the Qdisc and the private data are 32-byte aligned */ 532 /* ensure that the Qdisc and the private data are 64-byte aligned */
533 size = QDISC_ALIGN(sizeof(*sch)); 533 size = QDISC_ALIGN(sizeof(*sch));
534 size += ops->priv_size + (QDISC_ALIGNTO - 1); 534 size += ops->priv_size + (QDISC_ALIGNTO - 1);
535 535
@@ -591,6 +591,13 @@ void qdisc_reset(struct Qdisc *qdisc)
591} 591}
592EXPORT_SYMBOL(qdisc_reset); 592EXPORT_SYMBOL(qdisc_reset);
593 593
594static void qdisc_rcu_free(struct rcu_head *head)
595{
596 struct Qdisc *qdisc = container_of(head, struct Qdisc, rcu_head);
597
598 kfree((char *) qdisc - qdisc->padded);
599}
600
594void qdisc_destroy(struct Qdisc *qdisc) 601void qdisc_destroy(struct Qdisc *qdisc)
595{ 602{
596 const struct Qdisc_ops *ops = qdisc->ops; 603 const struct Qdisc_ops *ops = qdisc->ops;
@@ -614,7 +621,11 @@ void qdisc_destroy(struct Qdisc *qdisc)
614 dev_put(qdisc_dev(qdisc)); 621 dev_put(qdisc_dev(qdisc));
615 622
616 kfree_skb(qdisc->gso_skb); 623 kfree_skb(qdisc->gso_skb);
617 kfree((char *) qdisc - qdisc->padded); 624 /*
625 * gen_estimator est_timer() might access qdisc->q.lock,
626 * wait a RCU grace period before freeing qdisc.
627 */
628 call_rcu(&qdisc->rcu_head, qdisc_rcu_free);
618} 629}
619EXPORT_SYMBOL(qdisc_destroy); 630EXPORT_SYMBOL(qdisc_destroy);
620 631
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index c5a9ac566007..c65762823f5e 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -123,8 +123,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
123 case htons(ETH_P_IP): 123 case htons(ETH_P_IP):
124 { 124 {
125 const struct iphdr *iph = ip_hdr(skb); 125 const struct iphdr *iph = ip_hdr(skb);
126 h = iph->daddr; 126 h = (__force u32)iph->daddr;
127 h2 = iph->saddr ^ iph->protocol; 127 h2 = (__force u32)iph->saddr ^ iph->protocol;
128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) && 128 if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
129 (iph->protocol == IPPROTO_TCP || 129 (iph->protocol == IPPROTO_TCP ||
130 iph->protocol == IPPROTO_UDP || 130 iph->protocol == IPPROTO_UDP ||
@@ -138,8 +138,8 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
138 case htons(ETH_P_IPV6): 138 case htons(ETH_P_IPV6):
139 { 139 {
140 struct ipv6hdr *iph = ipv6_hdr(skb); 140 struct ipv6hdr *iph = ipv6_hdr(skb);
141 h = iph->daddr.s6_addr32[3]; 141 h = (__force u32)iph->daddr.s6_addr32[3];
142 h2 = iph->saddr.s6_addr32[3] ^ iph->nexthdr; 142 h2 = (__force u32)iph->saddr.s6_addr32[3] ^ iph->nexthdr;
143 if (iph->nexthdr == IPPROTO_TCP || 143 if (iph->nexthdr == IPPROTO_TCP ||
144 iph->nexthdr == IPPROTO_UDP || 144 iph->nexthdr == IPPROTO_UDP ||
145 iph->nexthdr == IPPROTO_UDPLITE || 145 iph->nexthdr == IPPROTO_UDPLITE ||
@@ -150,7 +150,7 @@ static unsigned sfq_hash(struct sfq_sched_data *q, struct sk_buff *skb)
150 break; 150 break;
151 } 151 }
152 default: 152 default:
153 h = (unsigned long)skb_dst(skb) ^ skb->protocol; 153 h = (unsigned long)skb_dst(skb) ^ (__force u32)skb->protocol;
154 h2 = (unsigned long)skb->sk; 154 h2 = (unsigned long)skb->sk;
155 } 155 }
156 156
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 9fb5d37c37ad..732689140fb8 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -232,7 +232,7 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
232 if (!(transport->param_flags & SPP_PMTUD_ENABLE)) 232 if (!(transport->param_flags & SPP_PMTUD_ENABLE))
233 skb->local_df = 1; 233 skb->local_df = 1;
234 234
235 return ip6_xmit(sk, skb, &fl, np->opt, 0); 235 return ip6_xmit(sk, skb, &fl, np->opt);
236} 236}
237 237
238/* Returns the dst cache entry for the given source and destination ip 238/* Returns the dst cache entry for the given source and destination ip
@@ -277,20 +277,7 @@ static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc,
277static inline int sctp_v6_addr_match_len(union sctp_addr *s1, 277static inline int sctp_v6_addr_match_len(union sctp_addr *s1,
278 union sctp_addr *s2) 278 union sctp_addr *s2)
279{ 279{
280 struct in6_addr *a1 = &s1->v6.sin6_addr; 280 return ipv6_addr_diff(&s1->v6.sin6_addr, &s2->v6.sin6_addr);
281 struct in6_addr *a2 = &s2->v6.sin6_addr;
282 int i, j;
283
284 for (i = 0; i < 4 ; i++) {
285 __be32 a1xora2;
286
287 a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i];
288
289 if ((j = fls(ntohl(a1xora2))))
290 return (i * 32 + 32 - j);
291 }
292
293 return (i*32);
294} 281}
295 282
296/* Fills in the source address(saddr) based on the destination address(daddr) 283/* Fills in the source address(saddr) based on the destination address(daddr)
@@ -372,13 +359,13 @@ static void sctp_v6_copy_addrlist(struct list_head *addrlist,
372 } 359 }
373 360
374 read_lock_bh(&in6_dev->lock); 361 read_lock_bh(&in6_dev->lock);
375 for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { 362 list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
376 /* Add the address to the local list. */ 363 /* Add the address to the local list. */
377 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); 364 addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC);
378 if (addr) { 365 if (addr) {
379 addr->a.v6.sin6_family = AF_INET6; 366 addr->a.v6.sin6_family = AF_INET6;
380 addr->a.v6.sin6_port = 0; 367 addr->a.v6.sin6_port = 0;
381 addr->a.v6.sin6_addr = ifp->addr; 368 ipv6_addr_copy(&addr->a.v6.sin6_addr, &ifp->addr);
382 addr->a.v6.sin6_scope_id = dev->ifindex; 369 addr->a.v6.sin6_scope_id = dev->ifindex;
383 addr->valid = 1; 370 addr->valid = 1;
384 INIT_LIST_HEAD(&addr->list); 371 INIT_LIST_HEAD(&addr->list);
@@ -419,7 +406,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
419{ 406{
420 addr->v6.sin6_family = AF_INET6; 407 addr->v6.sin6_family = AF_INET6;
421 addr->v6.sin6_port = 0; 408 addr->v6.sin6_port = 0;
422 addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; 409 ipv6_addr_copy(&addr->v6.sin6_addr, &inet6_sk(sk)->rcv_saddr);
423} 410}
424 411
425/* Initialize sk->sk_rcv_saddr from sctp_addr. */ 412/* Initialize sk->sk_rcv_saddr from sctp_addr. */
@@ -432,7 +419,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
432 inet6_sk(sk)->rcv_saddr.s6_addr32[3] = 419 inet6_sk(sk)->rcv_saddr.s6_addr32[3] =
433 addr->v4.sin_addr.s_addr; 420 addr->v4.sin_addr.s_addr;
434 } else { 421 } else {
435 inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; 422 ipv6_addr_copy(&inet6_sk(sk)->rcv_saddr, &addr->v6.sin6_addr);
436 } 423 }
437} 424}
438 425
@@ -445,7 +432,7 @@ static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
445 inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); 432 inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff);
446 inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; 433 inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
447 } else { 434 } else {
448 inet6_sk(sk)->daddr = addr->v6.sin6_addr; 435 ipv6_addr_copy(&inet6_sk(sk)->daddr, &addr->v6.sin6_addr);
449 } 436 }
450} 437}
451 438
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index a56f98e82f92..704298f4b284 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -854,7 +854,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
854 IP_PMTUDISC_DO : IP_PMTUDISC_DONT; 854 IP_PMTUDISC_DO : IP_PMTUDISC_DONT;
855 855
856 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); 856 SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS);
857 return ip_queue_xmit(skb, 0); 857 return ip_queue_xmit(skb);
858} 858}
859 859
860static struct sctp_af sctp_af_inet; 860static struct sctp_af sctp_af_inet;
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 007e8baba089..f34adcca8a8c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5482,7 +5482,6 @@ pp_found:
5482 */ 5482 */
5483 int reuse = sk->sk_reuse; 5483 int reuse = sk->sk_reuse;
5484 struct sock *sk2; 5484 struct sock *sk2;
5485 struct hlist_node *node;
5486 5485
5487 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); 5486 SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n");
5488 if (pp->fastreuse && sk->sk_reuse && 5487 if (pp->fastreuse && sk->sk_reuse &&
@@ -5703,7 +5702,7 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
5703 struct sctp_sock *sp = sctp_sk(sk); 5702 struct sctp_sock *sp = sctp_sk(sk);
5704 unsigned int mask; 5703 unsigned int mask;
5705 5704
5706 poll_wait(file, sk->sk_sleep, wait); 5705 poll_wait(file, sk_sleep(sk), wait);
5707 5706
5708 /* A TCP-style listening socket becomes readable when the accept queue 5707 /* A TCP-style listening socket becomes readable when the accept queue
5709 * is not empty. 5708 * is not empty.
@@ -5944,7 +5943,7 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5944 int error; 5943 int error;
5945 DEFINE_WAIT(wait); 5944 DEFINE_WAIT(wait);
5946 5945
5947 prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 5946 prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
5948 5947
5949 /* Socket errors? */ 5948 /* Socket errors? */
5950 error = sock_error(sk); 5949 error = sock_error(sk);
@@ -5981,14 +5980,14 @@ static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p)
5981 sctp_lock_sock(sk); 5980 sctp_lock_sock(sk);
5982 5981
5983ready: 5982ready:
5984 finish_wait(sk->sk_sleep, &wait); 5983 finish_wait(sk_sleep(sk), &wait);
5985 return 0; 5984 return 0;
5986 5985
5987interrupted: 5986interrupted:
5988 error = sock_intr_errno(*timeo_p); 5987 error = sock_intr_errno(*timeo_p);
5989 5988
5990out: 5989out:
5991 finish_wait(sk->sk_sleep, &wait); 5990 finish_wait(sk_sleep(sk), &wait);
5992 *err = error; 5991 *err = error;
5993 return error; 5992 return error;
5994} 5993}
@@ -6062,8 +6061,8 @@ static void __sctp_write_space(struct sctp_association *asoc)
6062 wake_up_interruptible(&asoc->wait); 6061 wake_up_interruptible(&asoc->wait);
6063 6062
6064 if (sctp_writeable(sk)) { 6063 if (sctp_writeable(sk)) {
6065 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 6064 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
6066 wake_up_interruptible(sk->sk_sleep); 6065 wake_up_interruptible(sk_sleep(sk));
6067 6066
6068 /* Note that we try to include the Async I/O support 6067 /* Note that we try to include the Async I/O support
6069 * here by modeling from the current TCP/UDP code. 6068 * here by modeling from the current TCP/UDP code.
@@ -6297,7 +6296,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6297 6296
6298 6297
6299 for (;;) { 6298 for (;;) {
6300 prepare_to_wait_exclusive(sk->sk_sleep, &wait, 6299 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
6301 TASK_INTERRUPTIBLE); 6300 TASK_INTERRUPTIBLE);
6302 6301
6303 if (list_empty(&ep->asocs)) { 6302 if (list_empty(&ep->asocs)) {
@@ -6323,7 +6322,7 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo)
6323 break; 6322 break;
6324 } 6323 }
6325 6324
6326 finish_wait(sk->sk_sleep, &wait); 6325 finish_wait(sk_sleep(sk), &wait);
6327 6326
6328 return err; 6327 return err;
6329} 6328}
@@ -6333,7 +6332,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6333 DEFINE_WAIT(wait); 6332 DEFINE_WAIT(wait);
6334 6333
6335 do { 6334 do {
6336 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 6335 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
6337 if (list_empty(&sctp_sk(sk)->ep->asocs)) 6336 if (list_empty(&sctp_sk(sk)->ep->asocs))
6338 break; 6337 break;
6339 sctp_release_sock(sk); 6338 sctp_release_sock(sk);
@@ -6341,7 +6340,7 @@ static void sctp_wait_for_close(struct sock *sk, long timeout)
6341 sctp_lock_sock(sk); 6340 sctp_lock_sock(sk);
6342 } while (!signal_pending(current) && timeout); 6341 } while (!signal_pending(current) && timeout);
6343 6342
6344 finish_wait(sk->sk_sleep, &wait); 6343 finish_wait(sk_sleep(sk), &wait);
6345} 6344}
6346 6345
6347static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk) 6346static void sctp_skb_set_owner_r_frag(struct sk_buff *skb, struct sock *sk)
diff --git a/net/socket.c b/net/socket.c
index 5e8d0af3c0e7..35bc198bbf68 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -620,10 +620,9 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
620 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, 620 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
621 sizeof(tv), &tv); 621 sizeof(tv), &tv);
622 } else { 622 } else {
623 struct timespec ts; 623 skb_get_timestampns(skb, &ts[0]);
624 skb_get_timestampns(skb, &ts);
625 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, 624 put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
626 sizeof(ts), &ts); 625 sizeof(ts[0]), &ts[0]);
627 } 626 }
628 } 627 }
629 628
diff --git a/net/sunrpc/auth_gss/gss_spkm3_token.c b/net/sunrpc/auth_gss/gss_spkm3_token.c
index 3308157436d2..a99825d7caa0 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_token.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_token.c
@@ -223,7 +223,7 @@ spkm3_verify_mic_token(unsigned char **tokp, int *mic_hdrlen, unsigned char **ck
223 223
224 /* only support SPKM_MIC_TOK */ 224 /* only support SPKM_MIC_TOK */
225 if((ptr[6] != 0x01) || (ptr[7] != 0x01)) { 225 if((ptr[6] != 0x01) || (ptr[7] != 0x01)) {
226 dprintk("RPC: ERROR unsupported SPKM3 token \n"); 226 dprintk("RPC: ERROR unsupported SPKM3 token\n");
227 goto out; 227 goto out;
228 } 228 }
229 229
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index f0c05d3311c1..7dcfe0cc3500 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -60,7 +60,7 @@ int bc_send(struct rpc_rqst *req)
60 rpc_put_task(task); 60 rpc_put_task(task);
61 } 61 }
62 return ret; 62 return ret;
63 dprintk("RPC: bc_send ret= %d \n", ret); 63 dprintk("RPC: bc_send ret= %d\n", ret);
64} 64}
65 65
66#endif /* CONFIG_NFS_V4_1 */ 66#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index a29f259204e6..ce0d5b35c2ac 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -419,8 +419,8 @@ static void svc_udp_data_ready(struct sock *sk, int count)
419 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 419 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
420 svc_xprt_enqueue(&svsk->sk_xprt); 420 svc_xprt_enqueue(&svsk->sk_xprt);
421 } 421 }
422 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 422 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
423 wake_up_interruptible(sk->sk_sleep); 423 wake_up_interruptible(sk_sleep(sk));
424} 424}
425 425
426/* 426/*
@@ -436,10 +436,10 @@ static void svc_write_space(struct sock *sk)
436 svc_xprt_enqueue(&svsk->sk_xprt); 436 svc_xprt_enqueue(&svsk->sk_xprt);
437 } 437 }
438 438
439 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 439 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk))) {
440 dprintk("RPC svc_write_space: someone sleeping on %p\n", 440 dprintk("RPC svc_write_space: someone sleeping on %p\n",
441 svsk); 441 svsk);
442 wake_up_interruptible(sk->sk_sleep); 442 wake_up_interruptible(sk_sleep(sk));
443 } 443 }
444} 444}
445 445
@@ -757,8 +757,8 @@ static void svc_tcp_listen_data_ready(struct sock *sk, int count_unused)
757 printk("svc: socket %p: no user data\n", sk); 757 printk("svc: socket %p: no user data\n", sk);
758 } 758 }
759 759
760 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 760 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
761 wake_up_interruptible_all(sk->sk_sleep); 761 wake_up_interruptible_all(sk_sleep(sk));
762} 762}
763 763
764/* 764/*
@@ -777,8 +777,8 @@ static void svc_tcp_state_change(struct sock *sk)
777 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags); 777 set_bit(XPT_CLOSE, &svsk->sk_xprt.xpt_flags);
778 svc_xprt_enqueue(&svsk->sk_xprt); 778 svc_xprt_enqueue(&svsk->sk_xprt);
779 } 779 }
780 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 780 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
781 wake_up_interruptible_all(sk->sk_sleep); 781 wake_up_interruptible_all(sk_sleep(sk));
782} 782}
783 783
784static void svc_tcp_data_ready(struct sock *sk, int count) 784static void svc_tcp_data_ready(struct sock *sk, int count)
@@ -791,8 +791,8 @@ static void svc_tcp_data_ready(struct sock *sk, int count)
791 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags); 791 set_bit(XPT_DATA, &svsk->sk_xprt.xpt_flags);
792 svc_xprt_enqueue(&svsk->sk_xprt); 792 svc_xprt_enqueue(&svsk->sk_xprt);
793 } 793 }
794 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 794 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
795 wake_up_interruptible(sk->sk_sleep); 795 wake_up_interruptible(sk_sleep(sk));
796} 796}
797 797
798/* 798/*
@@ -1494,8 +1494,8 @@ static void svc_sock_detach(struct svc_xprt *xprt)
1494 sk->sk_data_ready = svsk->sk_odata; 1494 sk->sk_data_ready = svsk->sk_odata;
1495 sk->sk_write_space = svsk->sk_owspace; 1495 sk->sk_write_space = svsk->sk_owspace;
1496 1496
1497 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1497 if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
1498 wake_up_interruptible(sk->sk_sleep); 1498 wake_up_interruptible(sk_sleep(sk));
1499} 1499}
1500 1500
1501/* 1501/*
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 42f09ade0044..699ade68aac1 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -974,7 +974,7 @@ void xprt_reserve(struct rpc_task *task)
974 974
975static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt) 975static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
976{ 976{
977 return xprt->xid++; 977 return (__force __be32)xprt->xid++;
978} 978}
979 979
980static inline void xprt_init_xid(struct rpc_xprt *xprt) 980static inline void xprt_init_xid(struct rpc_xprt *xprt)
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a3bfd4064912..90a051912c03 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -558,10 +558,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
558 struct tipc_bearer *unused1, 558 struct tipc_bearer *unused1,
559 struct tipc_media_addr *unused2) 559 struct tipc_media_addr *unused2)
560{ 560{
561 static int send_count = 0;
562
563 int bp_index; 561 int bp_index;
564 int swap_time;
565 562
566 /* Prepare buffer for broadcasting (if first time trying to send it) */ 563 /* Prepare buffer for broadcasting (if first time trying to send it) */
567 564
@@ -575,11 +572,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
575 msg_set_mc_netid(msg, tipc_net_id); 572 msg_set_mc_netid(msg, tipc_net_id);
576 } 573 }
577 574
578 /* Determine if bearer pairs should be swapped following this attempt */
579
580 if ((swap_time = (++send_count >= 10)))
581 send_count = 0;
582
583 /* Send buffer over bearers until all targets reached */ 575 /* Send buffer over bearers until all targets reached */
584 576
585 bcbearer->remains = tipc_cltr_bcast_nodes; 577 bcbearer->remains = tipc_cltr_bcast_nodes;
@@ -595,21 +587,22 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
595 if (bcbearer->remains_new.count == bcbearer->remains.count) 587 if (bcbearer->remains_new.count == bcbearer->remains.count)
596 continue; /* bearer pair doesn't add anything */ 588 continue; /* bearer pair doesn't add anything */
597 589
598 if (!p->publ.blocked && 590 if (p->publ.blocked ||
599 !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) { 591 p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
600 if (swap_time && s && !s->publ.blocked) 592 /* unable to send on primary bearer */
601 goto swap; 593 if (!s || s->publ.blocked ||
602 else 594 s->media->send_msg(buf, &s->publ,
603 goto update; 595 &s->media->bcast_addr)) {
596 /* unable to send on either bearer */
597 continue;
598 }
599 }
600
601 if (s) {
602 bcbearer->bpairs[bp_index].primary = s;
603 bcbearer->bpairs[bp_index].secondary = p;
604 } 604 }
605 605
606 if (!s || s->publ.blocked ||
607 s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
608 continue; /* unable to send using bearer pair */
609swap:
610 bcbearer->bpairs[bp_index].primary = s;
611 bcbearer->bpairs[bp_index].secondary = p;
612update:
613 if (bcbearer->remains_new.count == 0) 606 if (bcbearer->remains_new.count == 0)
614 return 0; 607 return 0;
615 608
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 52c571fedbe0..4e84c8431f32 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -49,7 +49,7 @@
49#include "config.h" 49#include "config.h"
50 50
51 51
52#define TIPC_MOD_VER "1.6.4" 52#define TIPC_MOD_VER "2.0.0"
53 53
54#ifndef CONFIG_TIPC_ZONES 54#ifndef CONFIG_TIPC_ZONES
55#define CONFIG_TIPC_ZONES 3 55#define CONFIG_TIPC_ZONES 3
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 1a7e4665af80..c76e82e5f982 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -877,7 +877,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
877 case TIMEOUT_EVT: 877 case TIMEOUT_EVT:
878 dbg_link("TIM "); 878 dbg_link("TIM ");
879 if (l_ptr->next_in_no != l_ptr->checkpoint) { 879 if (l_ptr->next_in_no != l_ptr->checkpoint) {
880 dbg_link("-> WW \n"); 880 dbg_link("-> WW\n");
881 l_ptr->state = WORKING_WORKING; 881 l_ptr->state = WORKING_WORKING;
882 l_ptr->fsm_msg_cnt = 0; 882 l_ptr->fsm_msg_cnt = 0;
883 l_ptr->checkpoint = l_ptr->next_in_no; 883 l_ptr->checkpoint = l_ptr->next_in_no;
@@ -934,7 +934,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
934 link_set_timer(l_ptr, cont_intv); 934 link_set_timer(l_ptr, cont_intv);
935 break; 935 break;
936 case RESET_MSG: 936 case RESET_MSG:
937 dbg_link("RES \n"); 937 dbg_link("RES\n");
938 dbg_link(" -> RR\n"); 938 dbg_link(" -> RR\n");
939 l_ptr->state = RESET_RESET; 939 l_ptr->state = RESET_RESET;
940 l_ptr->fsm_msg_cnt = 0; 940 l_ptr->fsm_msg_cnt = 0;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
947 l_ptr->started = 1; 947 l_ptr->started = 1;
948 /* fall through */ 948 /* fall through */
949 case TIMEOUT_EVT: 949 case TIMEOUT_EVT:
950 dbg_link("TIM \n"); 950 dbg_link("TIM\n");
951 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 951 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
952 l_ptr->fsm_msg_cnt++; 952 l_ptr->fsm_msg_cnt++;
953 link_set_timer(l_ptr, cont_intv); 953 link_set_timer(l_ptr, cont_intv);
@@ -1553,7 +1553,7 @@ u32 tipc_link_push_packet(struct link *l_ptr)
1553 1553
1554 /* Continue retransmission now, if there is anything: */ 1554 /* Continue retransmission now, if there is anything: */
1555 1555
1556 if (r_q_size && buf && !skb_cloned(buf)) { 1556 if (r_q_size && buf) {
1557 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1557 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1558 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1558 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1559 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1559 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
@@ -1722,15 +1722,16 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1722 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1722 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1723 1723
1724 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1724 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
1725 if (!skb_cloned(buf)) { 1725 if (l_ptr->retransm_queue_size == 0) {
1726 msg_dbg(msg, ">NO_RETR->BCONG>"); 1726 msg_dbg(msg, ">NO_RETR->BCONG>");
1727 dbg_print_link(l_ptr, " "); 1727 dbg_print_link(l_ptr, " ");
1728 l_ptr->retransm_queue_head = msg_seqno(msg); 1728 l_ptr->retransm_queue_head = msg_seqno(msg);
1729 l_ptr->retransm_queue_size = retransmits; 1729 l_ptr->retransm_queue_size = retransmits;
1730 return;
1731 } else { 1730 } else {
1732 /* Don't retransmit if driver already has the buffer */ 1731 err("Unexpected retransmit on link %s (qsize=%d)\n",
1732 l_ptr->name, l_ptr->retransm_queue_size);
1733 } 1733 }
1734 return;
1734 } else { 1735 } else {
1735 /* Detect repeated retransmit failures on uncongested bearer */ 1736 /* Detect repeated retransmit failures on uncongested bearer */
1736 1737
@@ -1745,7 +1746,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1745 } 1746 }
1746 } 1747 }
1747 1748
1748 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) { 1749 while (retransmits && (buf != l_ptr->next_out) && buf) {
1749 msg = buf_msg(buf); 1750 msg = buf_msg(buf);
1750 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1751 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1751 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1752 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
@@ -3294,7 +3295,7 @@ static void link_dump_rec_queue(struct link *l_ptr)
3294 info("buffer %x invalid\n", crs); 3295 info("buffer %x invalid\n", crs);
3295 return; 3296 return;
3296 } 3297 }
3297 msg_dbg(buf_msg(crs), "In rec queue: \n"); 3298 msg_dbg(buf_msg(crs), "In rec queue:\n");
3298 crs = crs->next; 3299 crs = crs->next;
3299 } 3300 }
3300} 3301}
diff --git a/net/tipc/net.c b/net/tipc/net.c
index f25b1cdb64eb..d7cd1e064a80 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -116,7 +116,7 @@
116*/ 116*/
117 117
118DEFINE_RWLOCK(tipc_net_lock); 118DEFINE_RWLOCK(tipc_net_lock);
119struct _zone *tipc_zones[256] = { NULL, }; 119static struct _zone *tipc_zones[256] = { NULL, };
120struct network tipc_net = { tipc_zones }; 120struct network tipc_net = { tipc_zones };
121 121
122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref) 122struct tipc_node *tipc_net_select_remote_node(u32 addr, u32 ref)
@@ -291,6 +291,6 @@ void tipc_net_stop(void)
291 tipc_bclink_stop(); 291 tipc_bclink_stop();
292 net_stop(); 292 net_stop();
293 write_unlock_bh(&tipc_net_lock); 293 write_unlock_bh(&tipc_net_lock);
294 info("Left network mode \n"); 294 info("Left network mode\n");
295} 295}
296 296
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2c24e7d6d950..17cc394f424f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,7 +278,7 @@ struct tipc_node *tipc_node_attach_link(struct link *l_ptr)
278 n_ptr->link_cnt++; 278 n_ptr->link_cnt++;
279 return n_ptr; 279 return n_ptr;
280 } 280 }
281 err("Attempt to establish second link on <%s> to %s \n", 281 err("Attempt to establish second link on <%s> to %s\n",
282 l_ptr->b_ptr->publ.name, 282 l_ptr->b_ptr->publ.name,
283 addr_string_fill(addr_string, l_ptr->addr)); 283 addr_string_fill(addr_string, l_ptr->addr));
284 } 284 }
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index cfb20b80b3a1..66e889ba48fd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -446,7 +446,7 @@ static unsigned int poll(struct file *file, struct socket *sock,
446 struct sock *sk = sock->sk; 446 struct sock *sk = sock->sk;
447 u32 mask; 447 u32 mask;
448 448
449 poll_wait(file, sk->sk_sleep, wait); 449 poll_wait(file, sk_sleep(sk), wait);
450 450
451 if (!skb_queue_empty(&sk->sk_receive_queue) || 451 if (!skb_queue_empty(&sk->sk_receive_queue) ||
452 (sock->state == SS_UNCONNECTED) || 452 (sock->state == SS_UNCONNECTED) ||
@@ -591,7 +591,7 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
591 break; 591 break;
592 } 592 }
593 release_sock(sk); 593 release_sock(sk);
594 res = wait_event_interruptible(*sk->sk_sleep, 594 res = wait_event_interruptible(*sk_sleep(sk),
595 !tport->congested); 595 !tport->congested);
596 lock_sock(sk); 596 lock_sock(sk);
597 if (res) 597 if (res)
@@ -650,7 +650,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
650 break; 650 break;
651 } 651 }
652 release_sock(sk); 652 release_sock(sk);
653 res = wait_event_interruptible(*sk->sk_sleep, 653 res = wait_event_interruptible(*sk_sleep(sk),
654 (!tport->congested || !tport->connected)); 654 (!tport->congested || !tport->connected));
655 lock_sock(sk); 655 lock_sock(sk);
656 if (res) 656 if (res)
@@ -931,7 +931,7 @@ restart:
931 goto exit; 931 goto exit;
932 } 932 }
933 release_sock(sk); 933 release_sock(sk);
934 res = wait_event_interruptible(*sk->sk_sleep, 934 res = wait_event_interruptible(*sk_sleep(sk),
935 (!skb_queue_empty(&sk->sk_receive_queue) || 935 (!skb_queue_empty(&sk->sk_receive_queue) ||
936 (sock->state == SS_DISCONNECTING))); 936 (sock->state == SS_DISCONNECTING)));
937 lock_sock(sk); 937 lock_sock(sk);
@@ -1064,7 +1064,7 @@ restart:
1064 goto exit; 1064 goto exit;
1065 } 1065 }
1066 release_sock(sk); 1066 release_sock(sk);
1067 res = wait_event_interruptible(*sk->sk_sleep, 1067 res = wait_event_interruptible(*sk_sleep(sk),
1068 (!skb_queue_empty(&sk->sk_receive_queue) || 1068 (!skb_queue_empty(&sk->sk_receive_queue) ||
1069 (sock->state == SS_DISCONNECTING))); 1069 (sock->state == SS_DISCONNECTING)));
1070 lock_sock(sk); 1070 lock_sock(sk);
@@ -1271,8 +1271,8 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1271 tipc_disconnect_port(tipc_sk_port(sk)); 1271 tipc_disconnect_port(tipc_sk_port(sk));
1272 } 1272 }
1273 1273
1274 if (waitqueue_active(sk->sk_sleep)) 1274 if (waitqueue_active(sk_sleep(sk)))
1275 wake_up_interruptible(sk->sk_sleep); 1275 wake_up_interruptible(sk_sleep(sk));
1276 return TIPC_OK; 1276 return TIPC_OK;
1277} 1277}
1278 1278
@@ -1343,8 +1343,8 @@ static void wakeupdispatch(struct tipc_port *tport)
1343{ 1343{
1344 struct sock *sk = (struct sock *)tport->usr_handle; 1344 struct sock *sk = (struct sock *)tport->usr_handle;
1345 1345
1346 if (waitqueue_active(sk->sk_sleep)) 1346 if (waitqueue_active(sk_sleep(sk)))
1347 wake_up_interruptible(sk->sk_sleep); 1347 wake_up_interruptible(sk_sleep(sk));
1348} 1348}
1349 1349
1350/** 1350/**
@@ -1426,7 +1426,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */ 1426 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
1427 1427
1428 release_sock(sk); 1428 release_sock(sk);
1429 res = wait_event_interruptible_timeout(*sk->sk_sleep, 1429 res = wait_event_interruptible_timeout(*sk_sleep(sk),
1430 (!skb_queue_empty(&sk->sk_receive_queue) || 1430 (!skb_queue_empty(&sk->sk_receive_queue) ||
1431 (sock->state != SS_CONNECTING)), 1431 (sock->state != SS_CONNECTING)),
1432 sk->sk_rcvtimeo); 1432 sk->sk_rcvtimeo);
@@ -1521,7 +1521,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
1521 goto exit; 1521 goto exit;
1522 } 1522 }
1523 release_sock(sk); 1523 release_sock(sk);
1524 res = wait_event_interruptible(*sk->sk_sleep, 1524 res = wait_event_interruptible(*sk_sleep(sk),
1525 (!skb_queue_empty(&sk->sk_receive_queue))); 1525 (!skb_queue_empty(&sk->sk_receive_queue)));
1526 lock_sock(sk); 1526 lock_sock(sk);
1527 if (res) 1527 if (res)
@@ -1632,8 +1632,8 @@ restart:
1632 /* Discard any unreceived messages; wake up sleeping tasks */ 1632 /* Discard any unreceived messages; wake up sleeping tasks */
1633 1633
1634 discard_rx_queue(sk); 1634 discard_rx_queue(sk);
1635 if (waitqueue_active(sk->sk_sleep)) 1635 if (waitqueue_active(sk_sleep(sk)))
1636 wake_up_interruptible(sk->sk_sleep); 1636 wake_up_interruptible(sk_sleep(sk));
1637 res = 0; 1637 res = 0;
1638 break; 1638 break;
1639 1639
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index ff123e56114a..ab6eab4c45e2 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -274,7 +274,7 @@ static void subscr_cancel(struct tipc_subscr *s,
274{ 274{
275 struct subscription *sub; 275 struct subscription *sub;
276 struct subscription *sub_temp; 276 struct subscription *sub_temp;
277 __u32 type, lower, upper; 277 __u32 type, lower, upper, timeout, filter;
278 int found = 0; 278 int found = 0;
279 279
280 /* Find first matching subscription, exit if not found */ 280 /* Find first matching subscription, exit if not found */
@@ -282,12 +282,18 @@ static void subscr_cancel(struct tipc_subscr *s,
282 type = ntohl(s->seq.type); 282 type = ntohl(s->seq.type);
283 lower = ntohl(s->seq.lower); 283 lower = ntohl(s->seq.lower);
284 upper = ntohl(s->seq.upper); 284 upper = ntohl(s->seq.upper);
285 timeout = ntohl(s->timeout);
286 filter = ntohl(s->filter) & ~TIPC_SUB_CANCEL;
285 287
286 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, 288 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
287 subscription_list) { 289 subscription_list) {
288 if ((type == sub->seq.type) && 290 if ((type == sub->seq.type) &&
289 (lower == sub->seq.lower) && 291 (lower == sub->seq.lower) &&
290 (upper == sub->seq.upper)) { 292 (upper == sub->seq.upper) &&
293 (timeout == sub->timeout) &&
294 (filter == sub->filter) &&
295 !memcmp(s->usr_handle,sub->evt.s.usr_handle,
296 sizeof(s->usr_handle)) ){
291 found = 1; 297 found = 1;
292 break; 298 break;
293 } 299 }
@@ -304,7 +310,7 @@ static void subscr_cancel(struct tipc_subscr *s,
304 k_term_timer(&sub->timer); 310 k_term_timer(&sub->timer);
305 spin_lock_bh(subscriber->lock); 311 spin_lock_bh(subscriber->lock);
306 } 312 }
307 dbg("Cancel: removing sub %u,%u,%u from subscriber %x list\n", 313 dbg("Cancel: removing sub %u,%u,%u from subscriber %p list\n",
308 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); 314 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
309 subscr_del(sub); 315 subscr_del(sub);
310} 316}
@@ -352,8 +358,7 @@ static struct subscription *subscr_subscribe(struct tipc_subscr *s,
352 sub->seq.upper = ntohl(s->seq.upper); 358 sub->seq.upper = ntohl(s->seq.upper);
353 sub->timeout = ntohl(s->timeout); 359 sub->timeout = ntohl(s->timeout);
354 sub->filter = ntohl(s->filter); 360 sub->filter = ntohl(s->filter);
355 if ((!(sub->filter & TIPC_SUB_PORTS) == 361 if ((sub->filter && (sub->filter != TIPC_SUB_PORTS)) ||
356 !(sub->filter & TIPC_SUB_SERVICE)) ||
357 (sub->seq.lower > sub->seq.upper)) { 362 (sub->seq.lower > sub->seq.upper)) {
358 warn("Subscription rejected, illegal request\n"); 363 warn("Subscription rejected, illegal request\n");
359 kfree(sub); 364 kfree(sub);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3d9122e78f41..87c0360eaa25 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -316,7 +316,7 @@ static void unix_write_space(struct sock *sk)
316 read_lock(&sk->sk_callback_lock); 316 read_lock(&sk->sk_callback_lock);
317 if (unix_writable(sk)) { 317 if (unix_writable(sk)) {
318 if (sk_has_sleeper(sk)) 318 if (sk_has_sleeper(sk))
319 wake_up_interruptible_sync(sk->sk_sleep); 319 wake_up_interruptible_sync(sk_sleep(sk));
320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); 320 sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
321 } 321 }
322 read_unlock(&sk->sk_callback_lock); 322 read_unlock(&sk->sk_callback_lock);
@@ -1736,7 +1736,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1736 unix_state_lock(sk); 1736 unix_state_lock(sk);
1737 1737
1738 for (;;) { 1738 for (;;) {
1739 prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); 1739 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1740 1740
1741 if (!skb_queue_empty(&sk->sk_receive_queue) || 1741 if (!skb_queue_empty(&sk->sk_receive_queue) ||
1742 sk->sk_err || 1742 sk->sk_err ||
@@ -1752,7 +1752,7 @@ static long unix_stream_data_wait(struct sock *sk, long timeo)
1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1752 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1753 } 1753 }
1754 1754
1755 finish_wait(sk->sk_sleep, &wait); 1755 finish_wait(sk_sleep(sk), &wait);
1756 unix_state_unlock(sk); 1756 unix_state_unlock(sk);
1757 return timeo; 1757 return timeo;
1758} 1758}
@@ -1991,7 +1991,7 @@ static unsigned int unix_poll(struct file *file, struct socket *sock, poll_table
1991 struct sock *sk = sock->sk; 1991 struct sock *sk = sock->sk;
1992 unsigned int mask; 1992 unsigned int mask;
1993 1993
1994 sock_poll_wait(file, sk->sk_sleep, wait); 1994 sock_poll_wait(file, sk_sleep(sk), wait);
1995 mask = 0; 1995 mask = 0;
1996 1996
1997 /* exceptional events? */ 1997 /* exceptional events? */
@@ -2028,7 +2028,7 @@ static unsigned int unix_dgram_poll(struct file *file, struct socket *sock,
2028 struct sock *sk = sock->sk, *other; 2028 struct sock *sk = sock->sk, *other;
2029 unsigned int mask, writable; 2029 unsigned int mask, writable;
2030 2030
2031 sock_poll_wait(file, sk->sk_sleep, wait); 2031 sock_poll_wait(file, sk_sleep(sk), wait);
2032 mask = 0; 2032 mask = 0;
2033 2033
2034 /* exceptional events? */ 2034 /* exceptional events? */
diff --git a/net/wimax/op-reset.c b/net/wimax/op-reset.c
index 4dc82a54ba30..68bedf3e5443 100644
--- a/net/wimax/op-reset.c
+++ b/net/wimax/op-reset.c
@@ -110,7 +110,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
110{ 110{
111 int result, ifindex; 111 int result, ifindex;
112 struct wimax_dev *wimax_dev; 112 struct wimax_dev *wimax_dev;
113 struct device *dev;
114 113
115 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); 114 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
116 result = -ENODEV; 115 result = -ENODEV;
@@ -123,7 +122,6 @@ int wimax_gnl_doit_reset(struct sk_buff *skb, struct genl_info *info)
123 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); 122 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
124 if (wimax_dev == NULL) 123 if (wimax_dev == NULL)
125 goto error_no_wimax_dev; 124 goto error_no_wimax_dev;
126 dev = wimax_dev_to_dev(wimax_dev);
127 /* Execute the operation and send the result back to user space */ 125 /* Execute the operation and send the result back to user space */
128 result = wimax_reset(wimax_dev); 126 result = wimax_reset(wimax_dev);
129 dev_put(wimax_dev->net_dev); 127 dev_put(wimax_dev->net_dev);
diff --git a/net/wimax/op-state-get.c b/net/wimax/op-state-get.c
index 11ad3356eb56..aff8776e2d41 100644
--- a/net/wimax/op-state-get.c
+++ b/net/wimax/op-state-get.c
@@ -53,7 +53,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
53{ 53{
54 int result, ifindex; 54 int result, ifindex;
55 struct wimax_dev *wimax_dev; 55 struct wimax_dev *wimax_dev;
56 struct device *dev;
57 56
58 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info); 57 d_fnstart(3, NULL, "(skb %p info %p)\n", skb, info);
59 result = -ENODEV; 58 result = -ENODEV;
@@ -66,7 +65,6 @@ int wimax_gnl_doit_state_get(struct sk_buff *skb, struct genl_info *info)
66 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex); 65 wimax_dev = wimax_dev_get_by_genl_info(info, ifindex);
67 if (wimax_dev == NULL) 66 if (wimax_dev == NULL)
68 goto error_no_wimax_dev; 67 goto error_no_wimax_dev;
69 dev = wimax_dev_to_dev(wimax_dev);
70 /* Execute the operation and send the result back to user space */ 68 /* Execute the operation and send the result back to user space */
71 result = wimax_state_get(wimax_dev); 69 result = wimax_state_get(wimax_dev);
72 dev_put(wimax_dev->net_dev); 70 dev_put(wimax_dev->net_dev);
diff --git a/net/wireless/core.h b/net/wireless/core.h
index d52da913145a..b2234b436ead 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -293,13 +293,15 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
293 const u8 *bssid, 293 const u8 *bssid,
294 const u8 *ssid, int ssid_len, 294 const u8 *ssid, int ssid_len,
295 const u8 *ie, int ie_len, 295 const u8 *ie, int ie_len,
296 const u8 *key, int key_len, int key_idx); 296 const u8 *key, int key_len, int key_idx,
297 bool local_state_change);
297int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev, 298int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
298 struct net_device *dev, struct ieee80211_channel *chan, 299 struct net_device *dev, struct ieee80211_channel *chan,
299 enum nl80211_auth_type auth_type, const u8 *bssid, 300 enum nl80211_auth_type auth_type, const u8 *bssid,
300 const u8 *ssid, int ssid_len, 301 const u8 *ssid, int ssid_len,
301 const u8 *ie, int ie_len, 302 const u8 *ie, int ie_len,
302 const u8 *key, int key_len, int key_idx); 303 const u8 *key, int key_len, int key_idx,
304 bool local_state_change);
303int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev, 305int __cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
304 struct net_device *dev, 306 struct net_device *dev,
305 struct ieee80211_channel *chan, 307 struct ieee80211_channel *chan,
@@ -315,13 +317,16 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
315 struct cfg80211_crypto_settings *crypt); 317 struct cfg80211_crypto_settings *crypt);
316int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 318int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
317 struct net_device *dev, const u8 *bssid, 319 struct net_device *dev, const u8 *bssid,
318 const u8 *ie, int ie_len, u16 reason); 320 const u8 *ie, int ie_len, u16 reason,
321 bool local_state_change);
319int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 322int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
320 struct net_device *dev, const u8 *bssid, 323 struct net_device *dev, const u8 *bssid,
321 const u8 *ie, int ie_len, u16 reason); 324 const u8 *ie, int ie_len, u16 reason,
325 bool local_state_change);
322int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 326int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
323 struct net_device *dev, const u8 *bssid, 327 struct net_device *dev, const u8 *bssid,
324 const u8 *ie, int ie_len, u16 reason); 328 const u8 *ie, int ie_len, u16 reason,
329 bool local_state_change);
325void cfg80211_mlme_down(struct cfg80211_registered_device *rdev, 330void cfg80211_mlme_down(struct cfg80211_registered_device *rdev,
326 struct net_device *dev); 331 struct net_device *dev);
327void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid, 332void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 22139fa46115..48ead6f0426d 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -378,7 +378,8 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
378 const u8 *bssid, 378 const u8 *bssid,
379 const u8 *ssid, int ssid_len, 379 const u8 *ssid, int ssid_len,
380 const u8 *ie, int ie_len, 380 const u8 *ie, int ie_len,
381 const u8 *key, int key_len, int key_idx) 381 const u8 *key, int key_len, int key_idx,
382 bool local_state_change)
382{ 383{
383 struct wireless_dev *wdev = dev->ieee80211_ptr; 384 struct wireless_dev *wdev = dev->ieee80211_ptr;
384 struct cfg80211_auth_request req; 385 struct cfg80211_auth_request req;
@@ -408,6 +409,7 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
408 409
409 memset(&req, 0, sizeof(req)); 410 memset(&req, 0, sizeof(req));
410 411
412 req.local_state_change = local_state_change;
411 req.ie = ie; 413 req.ie = ie;
412 req.ie_len = ie_len; 414 req.ie_len = ie_len;
413 req.auth_type = auth_type; 415 req.auth_type = auth_type;
@@ -434,12 +436,18 @@ int __cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
434 goto out; 436 goto out;
435 } 437 }
436 438
437 wdev->authtry_bsses[slot] = bss; 439 if (local_state_change)
440 wdev->auth_bsses[slot] = bss;
441 else
442 wdev->authtry_bsses[slot] = bss;
438 cfg80211_hold_bss(bss); 443 cfg80211_hold_bss(bss);
439 444
440 err = rdev->ops->auth(&rdev->wiphy, dev, &req); 445 err = rdev->ops->auth(&rdev->wiphy, dev, &req);
441 if (err) { 446 if (err) {
442 wdev->authtry_bsses[slot] = NULL; 447 if (local_state_change)
448 wdev->auth_bsses[slot] = NULL;
449 else
450 wdev->authtry_bsses[slot] = NULL;
443 cfg80211_unhold_bss(bss); 451 cfg80211_unhold_bss(bss);
444 } 452 }
445 453
@@ -454,14 +462,15 @@ int cfg80211_mlme_auth(struct cfg80211_registered_device *rdev,
454 enum nl80211_auth_type auth_type, const u8 *bssid, 462 enum nl80211_auth_type auth_type, const u8 *bssid,
455 const u8 *ssid, int ssid_len, 463 const u8 *ssid, int ssid_len,
456 const u8 *ie, int ie_len, 464 const u8 *ie, int ie_len,
457 const u8 *key, int key_len, int key_idx) 465 const u8 *key, int key_len, int key_idx,
466 bool local_state_change)
458{ 467{
459 int err; 468 int err;
460 469
461 wdev_lock(dev->ieee80211_ptr); 470 wdev_lock(dev->ieee80211_ptr);
462 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 471 err = __cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
463 ssid, ssid_len, ie, ie_len, 472 ssid, ssid_len, ie, ie_len,
464 key, key_len, key_idx); 473 key, key_len, key_idx, local_state_change);
465 wdev_unlock(dev->ieee80211_ptr); 474 wdev_unlock(dev->ieee80211_ptr);
466 475
467 return err; 476 return err;
@@ -555,7 +564,8 @@ int cfg80211_mlme_assoc(struct cfg80211_registered_device *rdev,
555 564
556int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 565int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
557 struct net_device *dev, const u8 *bssid, 566 struct net_device *dev, const u8 *bssid,
558 const u8 *ie, int ie_len, u16 reason) 567 const u8 *ie, int ie_len, u16 reason,
568 bool local_state_change)
559{ 569{
560 struct wireless_dev *wdev = dev->ieee80211_ptr; 570 struct wireless_dev *wdev = dev->ieee80211_ptr;
561 struct cfg80211_deauth_request req; 571 struct cfg80211_deauth_request req;
@@ -565,6 +575,7 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
565 575
566 memset(&req, 0, sizeof(req)); 576 memset(&req, 0, sizeof(req));
567 req.reason_code = reason; 577 req.reason_code = reason;
578 req.local_state_change = local_state_change;
568 req.ie = ie; 579 req.ie = ie;
569 req.ie_len = ie_len; 580 req.ie_len = ie_len;
570 if (wdev->current_bss && 581 if (wdev->current_bss &&
@@ -591,13 +602,15 @@ int __cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
591 602
592int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev, 603int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
593 struct net_device *dev, const u8 *bssid, 604 struct net_device *dev, const u8 *bssid,
594 const u8 *ie, int ie_len, u16 reason) 605 const u8 *ie, int ie_len, u16 reason,
606 bool local_state_change)
595{ 607{
596 struct wireless_dev *wdev = dev->ieee80211_ptr; 608 struct wireless_dev *wdev = dev->ieee80211_ptr;
597 int err; 609 int err;
598 610
599 wdev_lock(wdev); 611 wdev_lock(wdev);
600 err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason); 612 err = __cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason,
613 local_state_change);
601 wdev_unlock(wdev); 614 wdev_unlock(wdev);
602 615
603 return err; 616 return err;
@@ -605,7 +618,8 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
605 618
606static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 619static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
607 struct net_device *dev, const u8 *bssid, 620 struct net_device *dev, const u8 *bssid,
608 const u8 *ie, int ie_len, u16 reason) 621 const u8 *ie, int ie_len, u16 reason,
622 bool local_state_change)
609{ 623{
610 struct wireless_dev *wdev = dev->ieee80211_ptr; 624 struct wireless_dev *wdev = dev->ieee80211_ptr;
611 struct cfg80211_disassoc_request req; 625 struct cfg80211_disassoc_request req;
@@ -620,6 +634,7 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
620 634
621 memset(&req, 0, sizeof(req)); 635 memset(&req, 0, sizeof(req));
622 req.reason_code = reason; 636 req.reason_code = reason;
637 req.local_state_change = local_state_change;
623 req.ie = ie; 638 req.ie = ie;
624 req.ie_len = ie_len; 639 req.ie_len = ie_len;
625 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0) 640 if (memcmp(wdev->current_bss->pub.bssid, bssid, ETH_ALEN) == 0)
@@ -632,13 +647,15 @@ static int __cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
632 647
633int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev, 648int cfg80211_mlme_disassoc(struct cfg80211_registered_device *rdev,
634 struct net_device *dev, const u8 *bssid, 649 struct net_device *dev, const u8 *bssid,
635 const u8 *ie, int ie_len, u16 reason) 650 const u8 *ie, int ie_len, u16 reason,
651 bool local_state_change)
636{ 652{
637 struct wireless_dev *wdev = dev->ieee80211_ptr; 653 struct wireless_dev *wdev = dev->ieee80211_ptr;
638 int err; 654 int err;
639 655
640 wdev_lock(wdev); 656 wdev_lock(wdev);
641 err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason); 657 err = __cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason,
658 local_state_change);
642 wdev_unlock(wdev); 659 wdev_unlock(wdev);
643 660
644 return err; 661 return err;
@@ -895,3 +912,16 @@ void cfg80211_action_tx_status(struct net_device *dev, u64 cookie,
895 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp); 912 nl80211_send_action_tx_status(rdev, dev, cookie, buf, len, ack, gfp);
896} 913}
897EXPORT_SYMBOL(cfg80211_action_tx_status); 914EXPORT_SYMBOL(cfg80211_action_tx_status);
915
916void cfg80211_cqm_rssi_notify(struct net_device *dev,
917 enum nl80211_cqm_rssi_threshold_event rssi_event,
918 gfp_t gfp)
919{
920 struct wireless_dev *wdev = dev->ieee80211_ptr;
921 struct wiphy *wiphy = wdev->wiphy;
922 struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
923
924 /* Indicate roaming trigger event to user space */
925 nl80211_send_cqm_rssi_notify(rdev, dev, rssi_event, gfp);
926}
927EXPORT_SYMBOL(cfg80211_cqm_rssi_notify);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 030cf153bea2..356a84a5daee 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -150,6 +150,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
150 .len = IEEE80211_MAX_DATA_LEN }, 150 .len = IEEE80211_MAX_DATA_LEN },
151 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, 151 [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, },
152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, 152 [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 },
153 [NL80211_ATTR_CQM] = { .type = NLA_NESTED, },
154 [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG },
153}; 155};
154 156
155/* policy for the attributes */ 157/* policy for the attributes */
@@ -2096,7 +2098,8 @@ static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info)
2096 goto out_rtnl; 2098 goto out_rtnl;
2097 2099
2098 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && 2100 if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
2099 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN) { 2101 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN &&
2102 dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) {
2100 err = -EINVAL; 2103 err = -EINVAL;
2101 goto out; 2104 goto out;
2102 } 2105 }
@@ -3392,6 +3395,7 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3392 int err, ssid_len, ie_len = 0; 3395 int err, ssid_len, ie_len = 0;
3393 enum nl80211_auth_type auth_type; 3396 enum nl80211_auth_type auth_type;
3394 struct key_parse key; 3397 struct key_parse key;
3398 bool local_state_change;
3395 3399
3396 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3400 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3397 return -EINVAL; 3401 return -EINVAL;
@@ -3470,9 +3474,12 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
3470 goto out; 3474 goto out;
3471 } 3475 }
3472 3476
3477 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3478
3473 err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, 3479 err = cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid,
3474 ssid, ssid_len, ie, ie_len, 3480 ssid, ssid_len, ie, ie_len,
3475 key.p.key, key.p.key_len, key.idx); 3481 key.p.key, key.p.key_len, key.idx,
3482 local_state_change);
3476 3483
3477out: 3484out:
3478 cfg80211_unlock_rdev(rdev); 3485 cfg80211_unlock_rdev(rdev);
@@ -3649,6 +3656,7 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
3649 const u8 *ie = NULL, *bssid; 3656 const u8 *ie = NULL, *bssid;
3650 int err, ie_len = 0; 3657 int err, ie_len = 0;
3651 u16 reason_code; 3658 u16 reason_code;
3659 bool local_state_change;
3652 3660
3653 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3661 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3654 return -EINVAL; 3662 return -EINVAL;
@@ -3694,7 +3702,10 @@ static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info)
3694 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3702 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3695 } 3703 }
3696 3704
3697 err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code); 3705 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3706
3707 err = cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code,
3708 local_state_change);
3698 3709
3699out: 3710out:
3700 cfg80211_unlock_rdev(rdev); 3711 cfg80211_unlock_rdev(rdev);
@@ -3711,6 +3722,7 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3711 const u8 *ie = NULL, *bssid; 3722 const u8 *ie = NULL, *bssid;
3712 int err, ie_len = 0; 3723 int err, ie_len = 0;
3713 u16 reason_code; 3724 u16 reason_code;
3725 bool local_state_change;
3714 3726
3715 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) 3727 if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE]))
3716 return -EINVAL; 3728 return -EINVAL;
@@ -3756,7 +3768,10 @@ static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info)
3756 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); 3768 ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
3757 } 3769 }
3758 3770
3759 err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code); 3771 local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE];
3772
3773 err = cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code,
3774 local_state_change);
3760 3775
3761out: 3776out:
3762 cfg80211_unlock_rdev(rdev); 3777 cfg80211_unlock_rdev(rdev);
@@ -4779,6 +4794,84 @@ unlock_rtnl:
4779 return err; 4794 return err;
4780} 4795}
4781 4796
4797static struct nla_policy
4798nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
4799 [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
4800 [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
4801 [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
4802};
4803
4804static int nl80211_set_cqm_rssi(struct genl_info *info,
4805 s32 threshold, u32 hysteresis)
4806{
4807 struct cfg80211_registered_device *rdev;
4808 struct wireless_dev *wdev;
4809 struct net_device *dev;
4810 int err;
4811
4812 if (threshold > 0)
4813 return -EINVAL;
4814
4815 rtnl_lock();
4816
4817 err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev);
4818 if (err)
4819 goto unlock_rdev;
4820
4821 wdev = dev->ieee80211_ptr;
4822
4823 if (!rdev->ops->set_cqm_rssi_config) {
4824 err = -EOPNOTSUPP;
4825 goto unlock_rdev;
4826 }
4827
4828 if (wdev->iftype != NL80211_IFTYPE_STATION) {
4829 err = -EOPNOTSUPP;
4830 goto unlock_rdev;
4831 }
4832
4833 err = rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev,
4834 threshold, hysteresis);
4835
4836unlock_rdev:
4837 cfg80211_unlock_rdev(rdev);
4838 dev_put(dev);
4839 rtnl_unlock();
4840
4841 return err;
4842}
4843
4844static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info)
4845{
4846 struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1];
4847 struct nlattr *cqm;
4848 int err;
4849
4850 cqm = info->attrs[NL80211_ATTR_CQM];
4851 if (!cqm) {
4852 err = -EINVAL;
4853 goto out;
4854 }
4855
4856 err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm,
4857 nl80211_attr_cqm_policy);
4858 if (err)
4859 goto out;
4860
4861 if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] &&
4862 attrs[NL80211_ATTR_CQM_RSSI_HYST]) {
4863 s32 threshold;
4864 u32 hysteresis;
4865 threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]);
4866 hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]);
4867 err = nl80211_set_cqm_rssi(info, threshold, hysteresis);
4868 } else
4869 err = -EINVAL;
4870
4871out:
4872 return err;
4873}
4874
4782static struct genl_ops nl80211_ops[] = { 4875static struct genl_ops nl80211_ops[] = {
4783 { 4876 {
4784 .cmd = NL80211_CMD_GET_WIPHY, 4877 .cmd = NL80211_CMD_GET_WIPHY,
@@ -5083,6 +5176,12 @@ static struct genl_ops nl80211_ops[] = {
5083 .policy = nl80211_policy, 5176 .policy = nl80211_policy,
5084 /* can be retrieved by unprivileged users */ 5177 /* can be retrieved by unprivileged users */
5085 }, 5178 },
5179 {
5180 .cmd = NL80211_CMD_SET_CQM,
5181 .doit = nl80211_set_cqm,
5182 .policy = nl80211_policy,
5183 .flags = GENL_ADMIN_PERM,
5184 },
5086}; 5185};
5087 5186
5088static struct genl_multicast_group nl80211_mlme_mcgrp = { 5187static struct genl_multicast_group nl80211_mlme_mcgrp = {
@@ -5833,6 +5932,52 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
5833 nlmsg_free(msg); 5932 nlmsg_free(msg);
5834} 5933}
5835 5934
5935void
5936nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
5937 struct net_device *netdev,
5938 enum nl80211_cqm_rssi_threshold_event rssi_event,
5939 gfp_t gfp)
5940{
5941 struct sk_buff *msg;
5942 struct nlattr *pinfoattr;
5943 void *hdr;
5944
5945 msg = nlmsg_new(NLMSG_GOODSIZE, gfp);
5946 if (!msg)
5947 return;
5948
5949 hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM);
5950 if (!hdr) {
5951 nlmsg_free(msg);
5952 return;
5953 }
5954
5955 NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
5956 NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
5957
5958 pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM);
5959 if (!pinfoattr)
5960 goto nla_put_failure;
5961
5962 NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT,
5963 rssi_event);
5964
5965 nla_nest_end(msg, pinfoattr);
5966
5967 if (genlmsg_end(msg, hdr) < 0) {
5968 nlmsg_free(msg);
5969 return;
5970 }
5971
5972 genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0,
5973 nl80211_mlme_mcgrp.id, gfp);
5974 return;
5975
5976 nla_put_failure:
5977 genlmsg_cancel(msg, hdr);
5978 nlmsg_free(msg);
5979}
5980
5836static int nl80211_netlink_notify(struct notifier_block * nb, 5981static int nl80211_netlink_notify(struct notifier_block * nb,
5837 unsigned long state, 5982 unsigned long state,
5838 void *_notify) 5983 void *_notify)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 4ca511102c6c..2ad7fbc7d9f1 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -82,4 +82,10 @@ void nl80211_send_action_tx_status(struct cfg80211_registered_device *rdev,
82 const u8 *buf, size_t len, bool ack, 82 const u8 *buf, size_t len, bool ack,
83 gfp_t gfp); 83 gfp_t gfp);
84 84
85void
86nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev,
87 struct net_device *netdev,
88 enum nl80211_cqm_rssi_threshold_event rssi_event,
89 gfp_t gfp);
90
85#endif /* __NET_WIRELESS_NL80211_H */ 91#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 422da20d1e5b..8f0d97dd3109 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2356,10 +2356,10 @@ static void print_regdomain(const struct ieee80211_regdomain *rd)
2356 rdev->country_ie_alpha2[1]); 2356 rdev->country_ie_alpha2[1]);
2357 } else 2357 } else
2358 printk(KERN_INFO "cfg80211: Current regulatory " 2358 printk(KERN_INFO "cfg80211: Current regulatory "
2359 "domain intersected: \n"); 2359 "domain intersected:\n");
2360 } else 2360 } else
2361 printk(KERN_INFO "cfg80211: Current regulatory " 2361 printk(KERN_INFO "cfg80211: Current regulatory "
2362 "domain intersected: \n"); 2362 "domain intersected:\n");
2363 } else if (is_world_regdom(rd->alpha2)) 2363 } else if (is_world_regdom(rd->alpha2))
2364 printk(KERN_INFO "cfg80211: World regulatory " 2364 printk(KERN_INFO "cfg80211: World regulatory "
2365 "domain updated:\n"); 2365 "domain updated:\n");
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index f4dfd5f5f2ea..c2735775ec19 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -171,7 +171,7 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
171 params->ssid, params->ssid_len, 171 params->ssid, params->ssid_len,
172 NULL, 0, 172 NULL, 0,
173 params->key, params->key_len, 173 params->key, params->key_len,
174 params->key_idx); 174 params->key_idx, false);
175 case CFG80211_CONN_ASSOCIATE_NEXT: 175 case CFG80211_CONN_ASSOCIATE_NEXT:
176 BUG_ON(!rdev->ops->assoc); 176 BUG_ON(!rdev->ops->assoc);
177 wdev->conn->state = CFG80211_CONN_ASSOCIATING; 177 wdev->conn->state = CFG80211_CONN_ASSOCIATING;
@@ -186,12 +186,13 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
186 if (err) 186 if (err)
187 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 187 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
188 NULL, 0, 188 NULL, 0,
189 WLAN_REASON_DEAUTH_LEAVING); 189 WLAN_REASON_DEAUTH_LEAVING,
190 false);
190 return err; 191 return err;
191 case CFG80211_CONN_DEAUTH_ASSOC_FAIL: 192 case CFG80211_CONN_DEAUTH_ASSOC_FAIL:
192 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid, 193 __cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
193 NULL, 0, 194 NULL, 0,
194 WLAN_REASON_DEAUTH_LEAVING); 195 WLAN_REASON_DEAUTH_LEAVING, false);
195 /* return an error so that we call __cfg80211_connect_result() */ 196 /* return an error so that we call __cfg80211_connect_result() */
196 return -EINVAL; 197 return -EINVAL;
197 default: 198 default:
@@ -676,7 +677,8 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
676 continue; 677 continue;
677 bssid = wdev->auth_bsses[i]->pub.bssid; 678 bssid = wdev->auth_bsses[i]->pub.bssid;
678 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0, 679 ret = __cfg80211_mlme_deauth(rdev, dev, bssid, NULL, 0,
679 WLAN_REASON_DEAUTH_LEAVING); 680 WLAN_REASON_DEAUTH_LEAVING,
681 false);
680 WARN(ret, "deauth failed: %d\n", ret); 682 WARN(ret, "deauth failed: %d\n", ret);
681 } 683 }
682 } 684 }
@@ -935,7 +937,7 @@ int __cfg80211_disconnect(struct cfg80211_registered_device *rdev,
935 /* wdev->conn->params.bssid must be set if > SCANNING */ 937 /* wdev->conn->params.bssid must be set if > SCANNING */
936 err = __cfg80211_mlme_deauth(rdev, dev, 938 err = __cfg80211_mlme_deauth(rdev, dev,
937 wdev->conn->params.bssid, 939 wdev->conn->params.bssid,
938 NULL, 0, reason); 940 NULL, 0, reason, false);
939 if (err) 941 if (err)
940 return err; 942 return err;
941 } else { 943 } else {
@@ -991,7 +993,8 @@ void cfg80211_sme_disassoc(struct net_device *dev, int idx)
991 993
992 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN); 994 memcpy(bssid, wdev->auth_bsses[idx]->pub.bssid, ETH_ALEN);
993 if (__cfg80211_mlme_deauth(rdev, dev, bssid, 995 if (__cfg80211_mlme_deauth(rdev, dev, bssid,
994 NULL, 0, WLAN_REASON_DEAUTH_LEAVING)) { 996 NULL, 0, WLAN_REASON_DEAUTH_LEAVING,
997 false)) {
995 /* whatever -- assume gone anyway */ 998 /* whatever -- assume gone anyway */
996 cfg80211_unhold_bss(wdev->auth_bsses[idx]); 999 cfg80211_unhold_bss(wdev->auth_bsses[idx]);
997 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub); 1000 cfg80211_put_bss(&wdev->auth_bsses[idx]->pub);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d3574a4eb3ba..3416373a9c0c 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -331,11 +331,18 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
331 if (iftype == NL80211_IFTYPE_MESH_POINT) { 331 if (iftype == NL80211_IFTYPE_MESH_POINT) {
332 struct ieee80211s_hdr *meshdr = 332 struct ieee80211s_hdr *meshdr =
333 (struct ieee80211s_hdr *) (skb->data + hdrlen); 333 (struct ieee80211s_hdr *) (skb->data + hdrlen);
334 hdrlen += ieee80211_get_mesh_hdrlen(meshdr); 334 /* make sure meshdr->flags is on the linear part */
335 if (!pskb_may_pull(skb, hdrlen + 1))
336 return -1;
335 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) { 337 if (meshdr->flags & MESH_FLAGS_AE_A5_A6) {
336 memcpy(dst, meshdr->eaddr1, ETH_ALEN); 338 skb_copy_bits(skb, hdrlen +
337 memcpy(src, meshdr->eaddr2, ETH_ALEN); 339 offsetof(struct ieee80211s_hdr, eaddr1),
340 dst, ETH_ALEN);
341 skb_copy_bits(skb, hdrlen +
342 offsetof(struct ieee80211s_hdr, eaddr2),
343 src, ETH_ALEN);
338 } 344 }
345 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
339 } 346 }
340 break; 347 break;
341 case cpu_to_le16(IEEE80211_FCTL_FROMDS): 348 case cpu_to_le16(IEEE80211_FCTL_FROMDS):
@@ -347,9 +354,14 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
347 if (iftype == NL80211_IFTYPE_MESH_POINT) { 354 if (iftype == NL80211_IFTYPE_MESH_POINT) {
348 struct ieee80211s_hdr *meshdr = 355 struct ieee80211s_hdr *meshdr =
349 (struct ieee80211s_hdr *) (skb->data + hdrlen); 356 (struct ieee80211s_hdr *) (skb->data + hdrlen);
350 hdrlen += ieee80211_get_mesh_hdrlen(meshdr); 357 /* make sure meshdr->flags is on the linear part */
358 if (!pskb_may_pull(skb, hdrlen + 1))
359 return -1;
351 if (meshdr->flags & MESH_FLAGS_AE_A4) 360 if (meshdr->flags & MESH_FLAGS_AE_A4)
352 memcpy(src, meshdr->eaddr1, ETH_ALEN); 361 skb_copy_bits(skb, hdrlen +
362 offsetof(struct ieee80211s_hdr, eaddr1),
363 src, ETH_ALEN);
364 hdrlen += ieee80211_get_mesh_hdrlen(meshdr);
353 } 365 }
354 break; 366 break;
355 case cpu_to_le16(0): 367 case cpu_to_le16(0):
@@ -358,7 +370,7 @@ int ieee80211_data_to_8023(struct sk_buff *skb, const u8 *addr,
358 break; 370 break;
359 } 371 }
360 372
361 if (unlikely(skb->len - hdrlen < 8)) 373 if (!pskb_may_pull(skb, hdrlen + 8))
362 return -1; 374 return -1;
363 375
364 payload = skb->data + hdrlen; 376 payload = skb->data + hdrlen;
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 4f5a47091fde..0ef17bc42bac 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -29,226 +29,226 @@ typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *,
29 * know about. 29 * know about.
30 */ 30 */
31static const struct iw_ioctl_description standard_ioctl[] = { 31static const struct iw_ioctl_description standard_ioctl[] = {
32 [SIOCSIWCOMMIT - SIOCIWFIRST] = { 32 [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = {
33 .header_type = IW_HEADER_TYPE_NULL, 33 .header_type = IW_HEADER_TYPE_NULL,
34 }, 34 },
35 [SIOCGIWNAME - SIOCIWFIRST] = { 35 [IW_IOCTL_IDX(SIOCGIWNAME)] = {
36 .header_type = IW_HEADER_TYPE_CHAR, 36 .header_type = IW_HEADER_TYPE_CHAR,
37 .flags = IW_DESCR_FLAG_DUMP, 37 .flags = IW_DESCR_FLAG_DUMP,
38 }, 38 },
39 [SIOCSIWNWID - SIOCIWFIRST] = { 39 [IW_IOCTL_IDX(SIOCSIWNWID)] = {
40 .header_type = IW_HEADER_TYPE_PARAM, 40 .header_type = IW_HEADER_TYPE_PARAM,
41 .flags = IW_DESCR_FLAG_EVENT, 41 .flags = IW_DESCR_FLAG_EVENT,
42 }, 42 },
43 [SIOCGIWNWID - SIOCIWFIRST] = { 43 [IW_IOCTL_IDX(SIOCGIWNWID)] = {
44 .header_type = IW_HEADER_TYPE_PARAM, 44 .header_type = IW_HEADER_TYPE_PARAM,
45 .flags = IW_DESCR_FLAG_DUMP, 45 .flags = IW_DESCR_FLAG_DUMP,
46 }, 46 },
47 [SIOCSIWFREQ - SIOCIWFIRST] = { 47 [IW_IOCTL_IDX(SIOCSIWFREQ)] = {
48 .header_type = IW_HEADER_TYPE_FREQ, 48 .header_type = IW_HEADER_TYPE_FREQ,
49 .flags = IW_DESCR_FLAG_EVENT, 49 .flags = IW_DESCR_FLAG_EVENT,
50 }, 50 },
51 [SIOCGIWFREQ - SIOCIWFIRST] = { 51 [IW_IOCTL_IDX(SIOCGIWFREQ)] = {
52 .header_type = IW_HEADER_TYPE_FREQ, 52 .header_type = IW_HEADER_TYPE_FREQ,
53 .flags = IW_DESCR_FLAG_DUMP, 53 .flags = IW_DESCR_FLAG_DUMP,
54 }, 54 },
55 [SIOCSIWMODE - SIOCIWFIRST] = { 55 [IW_IOCTL_IDX(SIOCSIWMODE)] = {
56 .header_type = IW_HEADER_TYPE_UINT, 56 .header_type = IW_HEADER_TYPE_UINT,
57 .flags = IW_DESCR_FLAG_EVENT, 57 .flags = IW_DESCR_FLAG_EVENT,
58 }, 58 },
59 [SIOCGIWMODE - SIOCIWFIRST] = { 59 [IW_IOCTL_IDX(SIOCGIWMODE)] = {
60 .header_type = IW_HEADER_TYPE_UINT, 60 .header_type = IW_HEADER_TYPE_UINT,
61 .flags = IW_DESCR_FLAG_DUMP, 61 .flags = IW_DESCR_FLAG_DUMP,
62 }, 62 },
63 [SIOCSIWSENS - SIOCIWFIRST] = { 63 [IW_IOCTL_IDX(SIOCSIWSENS)] = {
64 .header_type = IW_HEADER_TYPE_PARAM, 64 .header_type = IW_HEADER_TYPE_PARAM,
65 }, 65 },
66 [SIOCGIWSENS - SIOCIWFIRST] = { 66 [IW_IOCTL_IDX(SIOCGIWSENS)] = {
67 .header_type = IW_HEADER_TYPE_PARAM, 67 .header_type = IW_HEADER_TYPE_PARAM,
68 }, 68 },
69 [SIOCSIWRANGE - SIOCIWFIRST] = { 69 [IW_IOCTL_IDX(SIOCSIWRANGE)] = {
70 .header_type = IW_HEADER_TYPE_NULL, 70 .header_type = IW_HEADER_TYPE_NULL,
71 }, 71 },
72 [SIOCGIWRANGE - SIOCIWFIRST] = { 72 [IW_IOCTL_IDX(SIOCGIWRANGE)] = {
73 .header_type = IW_HEADER_TYPE_POINT, 73 .header_type = IW_HEADER_TYPE_POINT,
74 .token_size = 1, 74 .token_size = 1,
75 .max_tokens = sizeof(struct iw_range), 75 .max_tokens = sizeof(struct iw_range),
76 .flags = IW_DESCR_FLAG_DUMP, 76 .flags = IW_DESCR_FLAG_DUMP,
77 }, 77 },
78 [SIOCSIWPRIV - SIOCIWFIRST] = { 78 [IW_IOCTL_IDX(SIOCSIWPRIV)] = {
79 .header_type = IW_HEADER_TYPE_NULL, 79 .header_type = IW_HEADER_TYPE_NULL,
80 }, 80 },
81 [SIOCGIWPRIV - SIOCIWFIRST] = { /* (handled directly by us) */ 81 [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */
82 .header_type = IW_HEADER_TYPE_POINT, 82 .header_type = IW_HEADER_TYPE_POINT,
83 .token_size = sizeof(struct iw_priv_args), 83 .token_size = sizeof(struct iw_priv_args),
84 .max_tokens = 16, 84 .max_tokens = 16,
85 .flags = IW_DESCR_FLAG_NOMAX, 85 .flags = IW_DESCR_FLAG_NOMAX,
86 }, 86 },
87 [SIOCSIWSTATS - SIOCIWFIRST] = { 87 [IW_IOCTL_IDX(SIOCSIWSTATS)] = {
88 .header_type = IW_HEADER_TYPE_NULL, 88 .header_type = IW_HEADER_TYPE_NULL,
89 }, 89 },
90 [SIOCGIWSTATS - SIOCIWFIRST] = { /* (handled directly by us) */ 90 [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */
91 .header_type = IW_HEADER_TYPE_POINT, 91 .header_type = IW_HEADER_TYPE_POINT,
92 .token_size = 1, 92 .token_size = 1,
93 .max_tokens = sizeof(struct iw_statistics), 93 .max_tokens = sizeof(struct iw_statistics),
94 .flags = IW_DESCR_FLAG_DUMP, 94 .flags = IW_DESCR_FLAG_DUMP,
95 }, 95 },
96 [SIOCSIWSPY - SIOCIWFIRST] = { 96 [IW_IOCTL_IDX(SIOCSIWSPY)] = {
97 .header_type = IW_HEADER_TYPE_POINT, 97 .header_type = IW_HEADER_TYPE_POINT,
98 .token_size = sizeof(struct sockaddr), 98 .token_size = sizeof(struct sockaddr),
99 .max_tokens = IW_MAX_SPY, 99 .max_tokens = IW_MAX_SPY,
100 }, 100 },
101 [SIOCGIWSPY - SIOCIWFIRST] = { 101 [IW_IOCTL_IDX(SIOCGIWSPY)] = {
102 .header_type = IW_HEADER_TYPE_POINT, 102 .header_type = IW_HEADER_TYPE_POINT,
103 .token_size = sizeof(struct sockaddr) + 103 .token_size = sizeof(struct sockaddr) +
104 sizeof(struct iw_quality), 104 sizeof(struct iw_quality),
105 .max_tokens = IW_MAX_SPY, 105 .max_tokens = IW_MAX_SPY,
106 }, 106 },
107 [SIOCSIWTHRSPY - SIOCIWFIRST] = { 107 [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = {
108 .header_type = IW_HEADER_TYPE_POINT, 108 .header_type = IW_HEADER_TYPE_POINT,
109 .token_size = sizeof(struct iw_thrspy), 109 .token_size = sizeof(struct iw_thrspy),
110 .min_tokens = 1, 110 .min_tokens = 1,
111 .max_tokens = 1, 111 .max_tokens = 1,
112 }, 112 },
113 [SIOCGIWTHRSPY - SIOCIWFIRST] = { 113 [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = {
114 .header_type = IW_HEADER_TYPE_POINT, 114 .header_type = IW_HEADER_TYPE_POINT,
115 .token_size = sizeof(struct iw_thrspy), 115 .token_size = sizeof(struct iw_thrspy),
116 .min_tokens = 1, 116 .min_tokens = 1,
117 .max_tokens = 1, 117 .max_tokens = 1,
118 }, 118 },
119 [SIOCSIWAP - SIOCIWFIRST] = { 119 [IW_IOCTL_IDX(SIOCSIWAP)] = {
120 .header_type = IW_HEADER_TYPE_ADDR, 120 .header_type = IW_HEADER_TYPE_ADDR,
121 }, 121 },
122 [SIOCGIWAP - SIOCIWFIRST] = { 122 [IW_IOCTL_IDX(SIOCGIWAP)] = {
123 .header_type = IW_HEADER_TYPE_ADDR, 123 .header_type = IW_HEADER_TYPE_ADDR,
124 .flags = IW_DESCR_FLAG_DUMP, 124 .flags = IW_DESCR_FLAG_DUMP,
125 }, 125 },
126 [SIOCSIWMLME - SIOCIWFIRST] = { 126 [IW_IOCTL_IDX(SIOCSIWMLME)] = {
127 .header_type = IW_HEADER_TYPE_POINT, 127 .header_type = IW_HEADER_TYPE_POINT,
128 .token_size = 1, 128 .token_size = 1,
129 .min_tokens = sizeof(struct iw_mlme), 129 .min_tokens = sizeof(struct iw_mlme),
130 .max_tokens = sizeof(struct iw_mlme), 130 .max_tokens = sizeof(struct iw_mlme),
131 }, 131 },
132 [SIOCGIWAPLIST - SIOCIWFIRST] = { 132 [IW_IOCTL_IDX(SIOCGIWAPLIST)] = {
133 .header_type = IW_HEADER_TYPE_POINT, 133 .header_type = IW_HEADER_TYPE_POINT,
134 .token_size = sizeof(struct sockaddr) + 134 .token_size = sizeof(struct sockaddr) +
135 sizeof(struct iw_quality), 135 sizeof(struct iw_quality),
136 .max_tokens = IW_MAX_AP, 136 .max_tokens = IW_MAX_AP,
137 .flags = IW_DESCR_FLAG_NOMAX, 137 .flags = IW_DESCR_FLAG_NOMAX,
138 }, 138 },
139 [SIOCSIWSCAN - SIOCIWFIRST] = { 139 [IW_IOCTL_IDX(SIOCSIWSCAN)] = {
140 .header_type = IW_HEADER_TYPE_POINT, 140 .header_type = IW_HEADER_TYPE_POINT,
141 .token_size = 1, 141 .token_size = 1,
142 .min_tokens = 0, 142 .min_tokens = 0,
143 .max_tokens = sizeof(struct iw_scan_req), 143 .max_tokens = sizeof(struct iw_scan_req),
144 }, 144 },
145 [SIOCGIWSCAN - SIOCIWFIRST] = { 145 [IW_IOCTL_IDX(SIOCGIWSCAN)] = {
146 .header_type = IW_HEADER_TYPE_POINT, 146 .header_type = IW_HEADER_TYPE_POINT,
147 .token_size = 1, 147 .token_size = 1,
148 .max_tokens = IW_SCAN_MAX_DATA, 148 .max_tokens = IW_SCAN_MAX_DATA,
149 .flags = IW_DESCR_FLAG_NOMAX, 149 .flags = IW_DESCR_FLAG_NOMAX,
150 }, 150 },
151 [SIOCSIWESSID - SIOCIWFIRST] = { 151 [IW_IOCTL_IDX(SIOCSIWESSID)] = {
152 .header_type = IW_HEADER_TYPE_POINT, 152 .header_type = IW_HEADER_TYPE_POINT,
153 .token_size = 1, 153 .token_size = 1,
154 .max_tokens = IW_ESSID_MAX_SIZE, 154 .max_tokens = IW_ESSID_MAX_SIZE,
155 .flags = IW_DESCR_FLAG_EVENT, 155 .flags = IW_DESCR_FLAG_EVENT,
156 }, 156 },
157 [SIOCGIWESSID - SIOCIWFIRST] = { 157 [IW_IOCTL_IDX(SIOCGIWESSID)] = {
158 .header_type = IW_HEADER_TYPE_POINT, 158 .header_type = IW_HEADER_TYPE_POINT,
159 .token_size = 1, 159 .token_size = 1,
160 .max_tokens = IW_ESSID_MAX_SIZE, 160 .max_tokens = IW_ESSID_MAX_SIZE,
161 .flags = IW_DESCR_FLAG_DUMP, 161 .flags = IW_DESCR_FLAG_DUMP,
162 }, 162 },
163 [SIOCSIWNICKN - SIOCIWFIRST] = { 163 [IW_IOCTL_IDX(SIOCSIWNICKN)] = {
164 .header_type = IW_HEADER_TYPE_POINT, 164 .header_type = IW_HEADER_TYPE_POINT,
165 .token_size = 1, 165 .token_size = 1,
166 .max_tokens = IW_ESSID_MAX_SIZE, 166 .max_tokens = IW_ESSID_MAX_SIZE,
167 }, 167 },
168 [SIOCGIWNICKN - SIOCIWFIRST] = { 168 [IW_IOCTL_IDX(SIOCGIWNICKN)] = {
169 .header_type = IW_HEADER_TYPE_POINT, 169 .header_type = IW_HEADER_TYPE_POINT,
170 .token_size = 1, 170 .token_size = 1,
171 .max_tokens = IW_ESSID_MAX_SIZE, 171 .max_tokens = IW_ESSID_MAX_SIZE,
172 }, 172 },
173 [SIOCSIWRATE - SIOCIWFIRST] = { 173 [IW_IOCTL_IDX(SIOCSIWRATE)] = {
174 .header_type = IW_HEADER_TYPE_PARAM, 174 .header_type = IW_HEADER_TYPE_PARAM,
175 }, 175 },
176 [SIOCGIWRATE - SIOCIWFIRST] = { 176 [IW_IOCTL_IDX(SIOCGIWRATE)] = {
177 .header_type = IW_HEADER_TYPE_PARAM, 177 .header_type = IW_HEADER_TYPE_PARAM,
178 }, 178 },
179 [SIOCSIWRTS - SIOCIWFIRST] = { 179 [IW_IOCTL_IDX(SIOCSIWRTS)] = {
180 .header_type = IW_HEADER_TYPE_PARAM, 180 .header_type = IW_HEADER_TYPE_PARAM,
181 }, 181 },
182 [SIOCGIWRTS - SIOCIWFIRST] = { 182 [IW_IOCTL_IDX(SIOCGIWRTS)] = {
183 .header_type = IW_HEADER_TYPE_PARAM, 183 .header_type = IW_HEADER_TYPE_PARAM,
184 }, 184 },
185 [SIOCSIWFRAG - SIOCIWFIRST] = { 185 [IW_IOCTL_IDX(SIOCSIWFRAG)] = {
186 .header_type = IW_HEADER_TYPE_PARAM, 186 .header_type = IW_HEADER_TYPE_PARAM,
187 }, 187 },
188 [SIOCGIWFRAG - SIOCIWFIRST] = { 188 [IW_IOCTL_IDX(SIOCGIWFRAG)] = {
189 .header_type = IW_HEADER_TYPE_PARAM, 189 .header_type = IW_HEADER_TYPE_PARAM,
190 }, 190 },
191 [SIOCSIWTXPOW - SIOCIWFIRST] = { 191 [IW_IOCTL_IDX(SIOCSIWTXPOW)] = {
192 .header_type = IW_HEADER_TYPE_PARAM, 192 .header_type = IW_HEADER_TYPE_PARAM,
193 }, 193 },
194 [SIOCGIWTXPOW - SIOCIWFIRST] = { 194 [IW_IOCTL_IDX(SIOCGIWTXPOW)] = {
195 .header_type = IW_HEADER_TYPE_PARAM, 195 .header_type = IW_HEADER_TYPE_PARAM,
196 }, 196 },
197 [SIOCSIWRETRY - SIOCIWFIRST] = { 197 [IW_IOCTL_IDX(SIOCSIWRETRY)] = {
198 .header_type = IW_HEADER_TYPE_PARAM, 198 .header_type = IW_HEADER_TYPE_PARAM,
199 }, 199 },
200 [SIOCGIWRETRY - SIOCIWFIRST] = { 200 [IW_IOCTL_IDX(SIOCGIWRETRY)] = {
201 .header_type = IW_HEADER_TYPE_PARAM, 201 .header_type = IW_HEADER_TYPE_PARAM,
202 }, 202 },
203 [SIOCSIWENCODE - SIOCIWFIRST] = { 203 [IW_IOCTL_IDX(SIOCSIWENCODE)] = {
204 .header_type = IW_HEADER_TYPE_POINT, 204 .header_type = IW_HEADER_TYPE_POINT,
205 .token_size = 1, 205 .token_size = 1,
206 .max_tokens = IW_ENCODING_TOKEN_MAX, 206 .max_tokens = IW_ENCODING_TOKEN_MAX,
207 .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, 207 .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT,
208 }, 208 },
209 [SIOCGIWENCODE - SIOCIWFIRST] = { 209 [IW_IOCTL_IDX(SIOCGIWENCODE)] = {
210 .header_type = IW_HEADER_TYPE_POINT, 210 .header_type = IW_HEADER_TYPE_POINT,
211 .token_size = 1, 211 .token_size = 1,
212 .max_tokens = IW_ENCODING_TOKEN_MAX, 212 .max_tokens = IW_ENCODING_TOKEN_MAX,
213 .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, 213 .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT,
214 }, 214 },
215 [SIOCSIWPOWER - SIOCIWFIRST] = { 215 [IW_IOCTL_IDX(SIOCSIWPOWER)] = {
216 .header_type = IW_HEADER_TYPE_PARAM, 216 .header_type = IW_HEADER_TYPE_PARAM,
217 }, 217 },
218 [SIOCGIWPOWER - SIOCIWFIRST] = { 218 [IW_IOCTL_IDX(SIOCGIWPOWER)] = {
219 .header_type = IW_HEADER_TYPE_PARAM, 219 .header_type = IW_HEADER_TYPE_PARAM,
220 }, 220 },
221 [SIOCSIWGENIE - SIOCIWFIRST] = { 221 [IW_IOCTL_IDX(SIOCSIWGENIE)] = {
222 .header_type = IW_HEADER_TYPE_POINT, 222 .header_type = IW_HEADER_TYPE_POINT,
223 .token_size = 1, 223 .token_size = 1,
224 .max_tokens = IW_GENERIC_IE_MAX, 224 .max_tokens = IW_GENERIC_IE_MAX,
225 }, 225 },
226 [SIOCGIWGENIE - SIOCIWFIRST] = { 226 [IW_IOCTL_IDX(SIOCGIWGENIE)] = {
227 .header_type = IW_HEADER_TYPE_POINT, 227 .header_type = IW_HEADER_TYPE_POINT,
228 .token_size = 1, 228 .token_size = 1,
229 .max_tokens = IW_GENERIC_IE_MAX, 229 .max_tokens = IW_GENERIC_IE_MAX,
230 }, 230 },
231 [SIOCSIWAUTH - SIOCIWFIRST] = { 231 [IW_IOCTL_IDX(SIOCSIWAUTH)] = {
232 .header_type = IW_HEADER_TYPE_PARAM, 232 .header_type = IW_HEADER_TYPE_PARAM,
233 }, 233 },
234 [SIOCGIWAUTH - SIOCIWFIRST] = { 234 [IW_IOCTL_IDX(SIOCGIWAUTH)] = {
235 .header_type = IW_HEADER_TYPE_PARAM, 235 .header_type = IW_HEADER_TYPE_PARAM,
236 }, 236 },
237 [SIOCSIWENCODEEXT - SIOCIWFIRST] = { 237 [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = {
238 .header_type = IW_HEADER_TYPE_POINT, 238 .header_type = IW_HEADER_TYPE_POINT,
239 .token_size = 1, 239 .token_size = 1,
240 .min_tokens = sizeof(struct iw_encode_ext), 240 .min_tokens = sizeof(struct iw_encode_ext),
241 .max_tokens = sizeof(struct iw_encode_ext) + 241 .max_tokens = sizeof(struct iw_encode_ext) +
242 IW_ENCODING_TOKEN_MAX, 242 IW_ENCODING_TOKEN_MAX,
243 }, 243 },
244 [SIOCGIWENCODEEXT - SIOCIWFIRST] = { 244 [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = {
245 .header_type = IW_HEADER_TYPE_POINT, 245 .header_type = IW_HEADER_TYPE_POINT,
246 .token_size = 1, 246 .token_size = 1,
247 .min_tokens = sizeof(struct iw_encode_ext), 247 .min_tokens = sizeof(struct iw_encode_ext),
248 .max_tokens = sizeof(struct iw_encode_ext) + 248 .max_tokens = sizeof(struct iw_encode_ext) +
249 IW_ENCODING_TOKEN_MAX, 249 IW_ENCODING_TOKEN_MAX,
250 }, 250 },
251 [SIOCSIWPMKSA - SIOCIWFIRST] = { 251 [IW_IOCTL_IDX(SIOCSIWPMKSA)] = {
252 .header_type = IW_HEADER_TYPE_POINT, 252 .header_type = IW_HEADER_TYPE_POINT,
253 .token_size = 1, 253 .token_size = 1,
254 .min_tokens = sizeof(struct iw_pmksa), 254 .min_tokens = sizeof(struct iw_pmksa),
@@ -262,44 +262,44 @@ static const unsigned standard_ioctl_num = ARRAY_SIZE(standard_ioctl);
262 * we know about. 262 * we know about.
263 */ 263 */
264static const struct iw_ioctl_description standard_event[] = { 264static const struct iw_ioctl_description standard_event[] = {
265 [IWEVTXDROP - IWEVFIRST] = { 265 [IW_EVENT_IDX(IWEVTXDROP)] = {
266 .header_type = IW_HEADER_TYPE_ADDR, 266 .header_type = IW_HEADER_TYPE_ADDR,
267 }, 267 },
268 [IWEVQUAL - IWEVFIRST] = { 268 [IW_EVENT_IDX(IWEVQUAL)] = {
269 .header_type = IW_HEADER_TYPE_QUAL, 269 .header_type = IW_HEADER_TYPE_QUAL,
270 }, 270 },
271 [IWEVCUSTOM - IWEVFIRST] = { 271 [IW_EVENT_IDX(IWEVCUSTOM)] = {
272 .header_type = IW_HEADER_TYPE_POINT, 272 .header_type = IW_HEADER_TYPE_POINT,
273 .token_size = 1, 273 .token_size = 1,
274 .max_tokens = IW_CUSTOM_MAX, 274 .max_tokens = IW_CUSTOM_MAX,
275 }, 275 },
276 [IWEVREGISTERED - IWEVFIRST] = { 276 [IW_EVENT_IDX(IWEVREGISTERED)] = {
277 .header_type = IW_HEADER_TYPE_ADDR, 277 .header_type = IW_HEADER_TYPE_ADDR,
278 }, 278 },
279 [IWEVEXPIRED - IWEVFIRST] = { 279 [IW_EVENT_IDX(IWEVEXPIRED)] = {
280 .header_type = IW_HEADER_TYPE_ADDR, 280 .header_type = IW_HEADER_TYPE_ADDR,
281 }, 281 },
282 [IWEVGENIE - IWEVFIRST] = { 282 [IW_EVENT_IDX(IWEVGENIE)] = {
283 .header_type = IW_HEADER_TYPE_POINT, 283 .header_type = IW_HEADER_TYPE_POINT,
284 .token_size = 1, 284 .token_size = 1,
285 .max_tokens = IW_GENERIC_IE_MAX, 285 .max_tokens = IW_GENERIC_IE_MAX,
286 }, 286 },
287 [IWEVMICHAELMICFAILURE - IWEVFIRST] = { 287 [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = {
288 .header_type = IW_HEADER_TYPE_POINT, 288 .header_type = IW_HEADER_TYPE_POINT,
289 .token_size = 1, 289 .token_size = 1,
290 .max_tokens = sizeof(struct iw_michaelmicfailure), 290 .max_tokens = sizeof(struct iw_michaelmicfailure),
291 }, 291 },
292 [IWEVASSOCREQIE - IWEVFIRST] = { 292 [IW_EVENT_IDX(IWEVASSOCREQIE)] = {
293 .header_type = IW_HEADER_TYPE_POINT, 293 .header_type = IW_HEADER_TYPE_POINT,
294 .token_size = 1, 294 .token_size = 1,
295 .max_tokens = IW_GENERIC_IE_MAX, 295 .max_tokens = IW_GENERIC_IE_MAX,
296 }, 296 },
297 [IWEVASSOCRESPIE - IWEVFIRST] = { 297 [IW_EVENT_IDX(IWEVASSOCRESPIE)] = {
298 .header_type = IW_HEADER_TYPE_POINT, 298 .header_type = IW_HEADER_TYPE_POINT,
299 .token_size = 1, 299 .token_size = 1,
300 .max_tokens = IW_GENERIC_IE_MAX, 300 .max_tokens = IW_GENERIC_IE_MAX,
301 }, 301 },
302 [IWEVPMKIDCAND - IWEVFIRST] = { 302 [IW_EVENT_IDX(IWEVPMKIDCAND)] = {
303 .header_type = IW_HEADER_TYPE_POINT, 303 .header_type = IW_HEADER_TYPE_POINT,
304 .token_size = 1, 304 .token_size = 1,
305 .max_tokens = sizeof(struct iw_pmkid_cand), 305 .max_tokens = sizeof(struct iw_pmkid_cand),
@@ -450,11 +450,11 @@ void wireless_send_event(struct net_device * dev,
450 450
451 /* Get the description of the Event */ 451 /* Get the description of the Event */
452 if (cmd <= SIOCIWLAST) { 452 if (cmd <= SIOCIWLAST) {
453 cmd_index = cmd - SIOCIWFIRST; 453 cmd_index = IW_IOCTL_IDX(cmd);
454 if (cmd_index < standard_ioctl_num) 454 if (cmd_index < standard_ioctl_num)
455 descr = &(standard_ioctl[cmd_index]); 455 descr = &(standard_ioctl[cmd_index]);
456 } else { 456 } else {
457 cmd_index = cmd - IWEVFIRST; 457 cmd_index = IW_EVENT_IDX(cmd);
458 if (cmd_index < standard_event_num) 458 if (cmd_index < standard_event_num)
459 descr = &(standard_event[cmd_index]); 459 descr = &(standard_event[cmd_index]);
460 } 460 }
@@ -663,7 +663,7 @@ static iw_handler get_handler(struct net_device *dev, unsigned int cmd)
663 return NULL; 663 return NULL;
664 664
665 /* Try as a standard command */ 665 /* Try as a standard command */
666 index = cmd - SIOCIWFIRST; 666 index = IW_IOCTL_IDX(cmd);
667 if (index < handlers->num_standard) 667 if (index < handlers->num_standard)
668 return handlers->standard[index]; 668 return handlers->standard[index];
669 669
@@ -955,9 +955,9 @@ static int ioctl_standard_call(struct net_device * dev,
955 int ret = -EINVAL; 955 int ret = -EINVAL;
956 956
957 /* Get the description of the IOCTL */ 957 /* Get the description of the IOCTL */
958 if ((cmd - SIOCIWFIRST) >= standard_ioctl_num) 958 if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num)
959 return -EOPNOTSUPP; 959 return -EOPNOTSUPP;
960 descr = &(standard_ioctl[cmd - SIOCIWFIRST]); 960 descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]);
961 961
962 /* Check if we have a pointer to user space data or not */ 962 /* Check if we have a pointer to user space data or not */
963 if (descr->header_type != IW_HEADER_TYPE_POINT) { 963 if (descr->header_type != IW_HEADER_TYPE_POINT) {
@@ -1013,7 +1013,7 @@ static int compat_standard_call(struct net_device *dev,
1013 struct iw_point iwp; 1013 struct iw_point iwp;
1014 int err; 1014 int err;
1015 1015
1016 descr = standard_ioctl + (cmd - SIOCIWFIRST); 1016 descr = standard_ioctl + IW_IOCTL_IDX(cmd);
1017 1017
1018 if (descr->header_type != IW_HEADER_TYPE_POINT) 1018 if (descr->header_type != IW_HEADER_TYPE_POINT)
1019 return ioctl_standard_call(dev, iwr, cmd, info, handler); 1019 return ioctl_standard_call(dev, iwr, cmd, info, handler);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index cbddd0cb83f1..6cffbc4da029 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -718,7 +718,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
718 DECLARE_WAITQUEUE(wait, current); 718 DECLARE_WAITQUEUE(wait, current);
719 int rc; 719 int rc;
720 720
721 add_wait_queue_exclusive(sk->sk_sleep, &wait); 721 add_wait_queue_exclusive(sk_sleep(sk), &wait);
722 for (;;) { 722 for (;;) {
723 __set_current_state(TASK_INTERRUPTIBLE); 723 __set_current_state(TASK_INTERRUPTIBLE);
724 rc = -ERESTARTSYS; 724 rc = -ERESTARTSYS;
@@ -738,7 +738,7 @@ static int x25_wait_for_connection_establishment(struct sock *sk)
738 break; 738 break;
739 } 739 }
740 __set_current_state(TASK_RUNNING); 740 __set_current_state(TASK_RUNNING);
741 remove_wait_queue(sk->sk_sleep, &wait); 741 remove_wait_queue(sk_sleep(sk), &wait);
742 return rc; 742 return rc;
743} 743}
744 744
@@ -838,7 +838,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
838 DECLARE_WAITQUEUE(wait, current); 838 DECLARE_WAITQUEUE(wait, current);
839 int rc = 0; 839 int rc = 0;
840 840
841 add_wait_queue_exclusive(sk->sk_sleep, &wait); 841 add_wait_queue_exclusive(sk_sleep(sk), &wait);
842 for (;;) { 842 for (;;) {
843 __set_current_state(TASK_INTERRUPTIBLE); 843 __set_current_state(TASK_INTERRUPTIBLE);
844 if (sk->sk_shutdown & RCV_SHUTDOWN) 844 if (sk->sk_shutdown & RCV_SHUTDOWN)
@@ -858,7 +858,7 @@ static int x25_wait_for_data(struct sock *sk, long timeout)
858 break; 858 break;
859 } 859 }
860 __set_current_state(TASK_RUNNING); 860 __set_current_state(TASK_RUNNING);
861 remove_wait_queue(sk->sk_sleep, &wait); 861 remove_wait_queue(sk_sleep(sk), &wait);
862 return rc; 862 return rc;
863} 863}
864 864
diff --git a/net/xfrm/xfrm_hash.h b/net/xfrm/xfrm_hash.h
index e5195c99f71e..1396572d2ade 100644
--- a/net/xfrm/xfrm_hash.h
+++ b/net/xfrm/xfrm_hash.h
@@ -16,7 +16,8 @@ static inline unsigned int __xfrm6_addr_hash(xfrm_address_t *addr)
16 16
17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 17static inline unsigned int __xfrm4_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
18{ 18{
19 return ntohl(daddr->a4 + saddr->a4); 19 u32 sum = (__force u32)daddr->a4 + (__force u32)saddr->a4;
20 return ntohl((__force __be32)sum);
20} 21}
21 22
22static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr) 23static inline unsigned int __xfrm6_daddr_saddr_hash(xfrm_address_t *daddr, xfrm_address_t *saddr)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 843e066649cb..7430ac26ec49 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -37,6 +37,8 @@
37DEFINE_MUTEX(xfrm_cfg_mutex); 37DEFINE_MUTEX(xfrm_cfg_mutex);
38EXPORT_SYMBOL(xfrm_cfg_mutex); 38EXPORT_SYMBOL(xfrm_cfg_mutex);
39 39
40static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock);
41static struct dst_entry *xfrm_policy_sk_bundles;
40static DEFINE_RWLOCK(xfrm_policy_lock); 42static DEFINE_RWLOCK(xfrm_policy_lock);
41 43
42static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 44static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
@@ -44,12 +46,10 @@ static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
44 46
45static struct kmem_cache *xfrm_dst_cache __read_mostly; 47static struct kmem_cache *xfrm_dst_cache __read_mostly;
46 48
47static HLIST_HEAD(xfrm_policy_gc_list);
48static DEFINE_SPINLOCK(xfrm_policy_gc_lock);
49
50static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 49static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family);
51static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo);
52static void xfrm_init_pmtu(struct dst_entry *dst); 51static void xfrm_init_pmtu(struct dst_entry *dst);
52static int stale_bundle(struct dst_entry *dst);
53 53
54static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 54static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
55 int dir); 55 int dir);
@@ -156,7 +156,7 @@ static void xfrm_policy_timer(unsigned long data)
156 156
157 read_lock(&xp->lock); 157 read_lock(&xp->lock);
158 158
159 if (xp->walk.dead) 159 if (unlikely(xp->walk.dead))
160 goto out; 160 goto out;
161 161
162 dir = xfrm_policy_id2dir(xp->index); 162 dir = xfrm_policy_id2dir(xp->index);
@@ -216,6 +216,35 @@ expired:
216 xfrm_pol_put(xp); 216 xfrm_pol_put(xp);
217} 217}
218 218
219static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo)
220{
221 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
222
223 if (unlikely(pol->walk.dead))
224 flo = NULL;
225 else
226 xfrm_pol_hold(pol);
227
228 return flo;
229}
230
231static int xfrm_policy_flo_check(struct flow_cache_object *flo)
232{
233 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo);
234
235 return !pol->walk.dead;
236}
237
238static void xfrm_policy_flo_delete(struct flow_cache_object *flo)
239{
240 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo));
241}
242
243static const struct flow_cache_ops xfrm_policy_fc_ops = {
244 .get = xfrm_policy_flo_get,
245 .check = xfrm_policy_flo_check,
246 .delete = xfrm_policy_flo_delete,
247};
219 248
220/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 249/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2
221 * SPD calls. 250 * SPD calls.
@@ -236,6 +265,7 @@ struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp)
236 atomic_set(&policy->refcnt, 1); 265 atomic_set(&policy->refcnt, 1);
237 setup_timer(&policy->timer, xfrm_policy_timer, 266 setup_timer(&policy->timer, xfrm_policy_timer,
238 (unsigned long)policy); 267 (unsigned long)policy);
268 policy->flo.ops = &xfrm_policy_fc_ops;
239 } 269 }
240 return policy; 270 return policy;
241} 271}
@@ -247,8 +277,6 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
247{ 277{
248 BUG_ON(!policy->walk.dead); 278 BUG_ON(!policy->walk.dead);
249 279
250 BUG_ON(policy->bundles);
251
252 if (del_timer(&policy->timer)) 280 if (del_timer(&policy->timer))
253 BUG(); 281 BUG();
254 282
@@ -257,63 +285,20 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
257} 285}
258EXPORT_SYMBOL(xfrm_policy_destroy); 286EXPORT_SYMBOL(xfrm_policy_destroy);
259 287
260static void xfrm_policy_gc_kill(struct xfrm_policy *policy)
261{
262 struct dst_entry *dst;
263
264 while ((dst = policy->bundles) != NULL) {
265 policy->bundles = dst->next;
266 dst_free(dst);
267 }
268
269 if (del_timer(&policy->timer))
270 atomic_dec(&policy->refcnt);
271
272 if (atomic_read(&policy->refcnt) > 1)
273 flow_cache_flush();
274
275 xfrm_pol_put(policy);
276}
277
278static void xfrm_policy_gc_task(struct work_struct *work)
279{
280 struct xfrm_policy *policy;
281 struct hlist_node *entry, *tmp;
282 struct hlist_head gc_list;
283
284 spin_lock_bh(&xfrm_policy_gc_lock);
285 gc_list.first = xfrm_policy_gc_list.first;
286 INIT_HLIST_HEAD(&xfrm_policy_gc_list);
287 spin_unlock_bh(&xfrm_policy_gc_lock);
288
289 hlist_for_each_entry_safe(policy, entry, tmp, &gc_list, bydst)
290 xfrm_policy_gc_kill(policy);
291}
292static DECLARE_WORK(xfrm_policy_gc_work, xfrm_policy_gc_task);
293
294/* Rule must be locked. Release descentant resources, announce 288/* Rule must be locked. Release descentant resources, announce
295 * entry dead. The rule must be unlinked from lists to the moment. 289 * entry dead. The rule must be unlinked from lists to the moment.
296 */ 290 */
297 291
298static void xfrm_policy_kill(struct xfrm_policy *policy) 292static void xfrm_policy_kill(struct xfrm_policy *policy)
299{ 293{
300 int dead;
301
302 write_lock_bh(&policy->lock);
303 dead = policy->walk.dead;
304 policy->walk.dead = 1; 294 policy->walk.dead = 1;
305 write_unlock_bh(&policy->lock);
306 295
307 if (unlikely(dead)) { 296 atomic_inc(&policy->genid);
308 WARN_ON(1);
309 return;
310 }
311 297
312 spin_lock_bh(&xfrm_policy_gc_lock); 298 if (del_timer(&policy->timer))
313 hlist_add_head(&policy->bydst, &xfrm_policy_gc_list); 299 xfrm_pol_put(policy);
314 spin_unlock_bh(&xfrm_policy_gc_lock);
315 300
316 schedule_work(&xfrm_policy_gc_work); 301 xfrm_pol_put(policy);
317} 302}
318 303
319static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 304static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024;
@@ -555,7 +540,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
555 struct xfrm_policy *delpol; 540 struct xfrm_policy *delpol;
556 struct hlist_head *chain; 541 struct hlist_head *chain;
557 struct hlist_node *entry, *newpos; 542 struct hlist_node *entry, *newpos;
558 struct dst_entry *gc_list;
559 u32 mark = policy->mark.v & policy->mark.m; 543 u32 mark = policy->mark.v & policy->mark.m;
560 544
561 write_lock_bh(&xfrm_policy_lock); 545 write_lock_bh(&xfrm_policy_lock);
@@ -605,34 +589,6 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
605 else if (xfrm_bydst_should_resize(net, dir, NULL)) 589 else if (xfrm_bydst_should_resize(net, dir, NULL))
606 schedule_work(&net->xfrm.policy_hash_work); 590 schedule_work(&net->xfrm.policy_hash_work);
607 591
608 read_lock_bh(&xfrm_policy_lock);
609 gc_list = NULL;
610 entry = &policy->bydst;
611 hlist_for_each_entry_continue(policy, entry, bydst) {
612 struct dst_entry *dst;
613
614 write_lock(&policy->lock);
615 dst = policy->bundles;
616 if (dst) {
617 struct dst_entry *tail = dst;
618 while (tail->next)
619 tail = tail->next;
620 tail->next = gc_list;
621 gc_list = dst;
622
623 policy->bundles = NULL;
624 }
625 write_unlock(&policy->lock);
626 }
627 read_unlock_bh(&xfrm_policy_lock);
628
629 while (gc_list) {
630 struct dst_entry *dst = gc_list;
631
632 gc_list = dst->next;
633 dst_free(dst);
634 }
635
636 return 0; 592 return 0;
637} 593}
638EXPORT_SYMBOL(xfrm_policy_insert); 594EXPORT_SYMBOL(xfrm_policy_insert);
@@ -671,10 +627,8 @@ struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type,
671 } 627 }
672 write_unlock_bh(&xfrm_policy_lock); 628 write_unlock_bh(&xfrm_policy_lock);
673 629
674 if (ret && delete) { 630 if (ret && delete)
675 atomic_inc(&flow_cache_genid);
676 xfrm_policy_kill(ret); 631 xfrm_policy_kill(ret);
677 }
678 return ret; 632 return ret;
679} 633}
680EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 634EXPORT_SYMBOL(xfrm_policy_bysel_ctx);
@@ -713,10 +667,8 @@ struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type,
713 } 667 }
714 write_unlock_bh(&xfrm_policy_lock); 668 write_unlock_bh(&xfrm_policy_lock);
715 669
716 if (ret && delete) { 670 if (ret && delete)
717 atomic_inc(&flow_cache_genid);
718 xfrm_policy_kill(ret); 671 xfrm_policy_kill(ret);
719 }
720 return ret; 672 return ret;
721} 673}
722EXPORT_SYMBOL(xfrm_policy_byid); 674EXPORT_SYMBOL(xfrm_policy_byid);
@@ -776,7 +728,6 @@ xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audi
776int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 728int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
777{ 729{
778 int dir, err = 0, cnt = 0; 730 int dir, err = 0, cnt = 0;
779 struct xfrm_policy *dp;
780 731
781 write_lock_bh(&xfrm_policy_lock); 732 write_lock_bh(&xfrm_policy_lock);
782 733
@@ -794,10 +745,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
794 &net->xfrm.policy_inexact[dir], bydst) { 745 &net->xfrm.policy_inexact[dir], bydst) {
795 if (pol->type != type) 746 if (pol->type != type)
796 continue; 747 continue;
797 dp = __xfrm_policy_unlink(pol, dir); 748 __xfrm_policy_unlink(pol, dir);
798 write_unlock_bh(&xfrm_policy_lock); 749 write_unlock_bh(&xfrm_policy_lock);
799 if (dp) 750 cnt++;
800 cnt++;
801 751
802 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 752 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid,
803 audit_info->sessionid, 753 audit_info->sessionid,
@@ -816,10 +766,9 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
816 bydst) { 766 bydst) {
817 if (pol->type != type) 767 if (pol->type != type)
818 continue; 768 continue;
819 dp = __xfrm_policy_unlink(pol, dir); 769 __xfrm_policy_unlink(pol, dir);
820 write_unlock_bh(&xfrm_policy_lock); 770 write_unlock_bh(&xfrm_policy_lock);
821 if (dp) 771 cnt++;
822 cnt++;
823 772
824 xfrm_audit_policy_delete(pol, 1, 773 xfrm_audit_policy_delete(pol, 1,
825 audit_info->loginuid, 774 audit_info->loginuid,
@@ -835,7 +784,6 @@ int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info)
835 } 784 }
836 if (!cnt) 785 if (!cnt)
837 err = -ESRCH; 786 err = -ESRCH;
838 atomic_inc(&flow_cache_genid);
839out: 787out:
840 write_unlock_bh(&xfrm_policy_lock); 788 write_unlock_bh(&xfrm_policy_lock);
841 return err; 789 return err;
@@ -989,32 +937,37 @@ fail:
989 return ret; 937 return ret;
990} 938}
991 939
992static int xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, 940static struct xfrm_policy *
993 u8 dir, void **objp, atomic_t **obj_refp) 941__xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir)
994{ 942{
943#ifdef CONFIG_XFRM_SUB_POLICY
995 struct xfrm_policy *pol; 944 struct xfrm_policy *pol;
996 int err = 0;
997 945
998#ifdef CONFIG_XFRM_SUB_POLICY
999 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 946 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir);
1000 if (IS_ERR(pol)) { 947 if (pol != NULL)
1001 err = PTR_ERR(pol); 948 return pol;
1002 pol = NULL;
1003 }
1004 if (pol || err)
1005 goto end;
1006#endif
1007 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1008 if (IS_ERR(pol)) {
1009 err = PTR_ERR(pol);
1010 pol = NULL;
1011 }
1012#ifdef CONFIG_XFRM_SUB_POLICY
1013end:
1014#endif 949#endif
1015 if ((*objp = (void *) pol) != NULL) 950 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir);
1016 *obj_refp = &pol->refcnt; 951}
1017 return err; 952
953static struct flow_cache_object *
954xfrm_policy_lookup(struct net *net, struct flowi *fl, u16 family,
955 u8 dir, struct flow_cache_object *old_obj, void *ctx)
956{
957 struct xfrm_policy *pol;
958
959 if (old_obj)
960 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo));
961
962 pol = __xfrm_policy_lookup(net, fl, family, dir);
963 if (IS_ERR_OR_NULL(pol))
964 return ERR_CAST(pol);
965
966 /* Resolver returns two references:
967 * one for cache and one for caller of flow_cache_lookup() */
968 xfrm_pol_hold(pol);
969
970 return &pol->flo;
1018} 971}
1019 972
1020static inline int policy_to_flow_dir(int dir) 973static inline int policy_to_flow_dir(int dir)
@@ -1104,8 +1057,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir)
1104 pol = __xfrm_policy_unlink(pol, dir); 1057 pol = __xfrm_policy_unlink(pol, dir);
1105 write_unlock_bh(&xfrm_policy_lock); 1058 write_unlock_bh(&xfrm_policy_lock);
1106 if (pol) { 1059 if (pol) {
1107 if (dir < XFRM_POLICY_MAX)
1108 atomic_inc(&flow_cache_genid);
1109 xfrm_policy_kill(pol); 1060 xfrm_policy_kill(pol);
1110 return 0; 1061 return 0;
1111 } 1062 }
@@ -1132,6 +1083,9 @@ int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol)
1132 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1083 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir);
1133 } 1084 }
1134 if (old_pol) 1085 if (old_pol)
1086 /* Unlinking succeeds always. This is the only function
1087 * allowed to delete or replace socket policy.
1088 */
1135 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1089 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir);
1136 write_unlock_bh(&xfrm_policy_lock); 1090 write_unlock_bh(&xfrm_policy_lock);
1137 1091
@@ -1300,18 +1254,6 @@ xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, struct flowi *fl,
1300 * still valid. 1254 * still valid.
1301 */ 1255 */
1302 1256
1303static struct dst_entry *
1304xfrm_find_bundle(struct flowi *fl, struct xfrm_policy *policy, unsigned short family)
1305{
1306 struct dst_entry *x;
1307 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
1308 if (unlikely(afinfo == NULL))
1309 return ERR_PTR(-EINVAL);
1310 x = afinfo->find_bundle(fl, policy);
1311 xfrm_policy_put_afinfo(afinfo);
1312 return x;
1313}
1314
1315static inline int xfrm_get_tos(struct flowi *fl, int family) 1257static inline int xfrm_get_tos(struct flowi *fl, int family)
1316{ 1258{
1317 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1259 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1327,6 +1269,54 @@ static inline int xfrm_get_tos(struct flowi *fl, int family)
1327 return tos; 1269 return tos;
1328} 1270}
1329 1271
1272static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo)
1273{
1274 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1275 struct dst_entry *dst = &xdst->u.dst;
1276
1277 if (xdst->route == NULL) {
1278 /* Dummy bundle - if it has xfrms we were not
1279 * able to build bundle as template resolution failed.
1280 * It means we need to try again resolving. */
1281 if (xdst->num_xfrms > 0)
1282 return NULL;
1283 } else {
1284 /* Real bundle */
1285 if (stale_bundle(dst))
1286 return NULL;
1287 }
1288
1289 dst_hold(dst);
1290 return flo;
1291}
1292
1293static int xfrm_bundle_flo_check(struct flow_cache_object *flo)
1294{
1295 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1296 struct dst_entry *dst = &xdst->u.dst;
1297
1298 if (!xdst->route)
1299 return 0;
1300 if (stale_bundle(dst))
1301 return 0;
1302
1303 return 1;
1304}
1305
1306static void xfrm_bundle_flo_delete(struct flow_cache_object *flo)
1307{
1308 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo);
1309 struct dst_entry *dst = &xdst->u.dst;
1310
1311 dst_free(dst);
1312}
1313
1314static const struct flow_cache_ops xfrm_bundle_fc_ops = {
1315 .get = xfrm_bundle_flo_get,
1316 .check = xfrm_bundle_flo_check,
1317 .delete = xfrm_bundle_flo_delete,
1318};
1319
1330static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1320static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1331{ 1321{
1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1322 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family);
@@ -1349,9 +1339,10 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family)
1349 BUG(); 1339 BUG();
1350 } 1340 }
1351 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); 1341 xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS);
1352
1353 xfrm_policy_put_afinfo(afinfo); 1342 xfrm_policy_put_afinfo(afinfo);
1354 1343
1344 xdst->flo.ops = &xfrm_bundle_fc_ops;
1345
1355 return xdst; 1346 return xdst;
1356} 1347}
1357 1348
@@ -1389,6 +1380,7 @@ static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
1389 return err; 1380 return err;
1390} 1381}
1391 1382
1383
1392/* Allocate chain of dst_entry's, attach known xfrm's, calculate 1384/* Allocate chain of dst_entry's, attach known xfrm's, calculate
1393 * all the metrics... Shortly, bundle a bundle. 1385 * all the metrics... Shortly, bundle a bundle.
1394 */ 1386 */
@@ -1452,7 +1444,7 @@ static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy,
1452 dst_hold(dst); 1444 dst_hold(dst);
1453 1445
1454 dst1->xfrm = xfrm[i]; 1446 dst1->xfrm = xfrm[i];
1455 xdst->genid = xfrm[i]->genid; 1447 xdst->xfrm_genid = xfrm[i]->genid;
1456 1448
1457 dst1->obsolete = -1; 1449 dst1->obsolete = -1;
1458 dst1->flags |= DST_HOST; 1450 dst1->flags |= DST_HOST;
@@ -1545,7 +1537,186 @@ xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl)
1545#endif 1537#endif
1546} 1538}
1547 1539
1548static int stale_bundle(struct dst_entry *dst); 1540static int xfrm_expand_policies(struct flowi *fl, u16 family,
1541 struct xfrm_policy **pols,
1542 int *num_pols, int *num_xfrms)
1543{
1544 int i;
1545
1546 if (*num_pols == 0 || !pols[0]) {
1547 *num_pols = 0;
1548 *num_xfrms = 0;
1549 return 0;
1550 }
1551 if (IS_ERR(pols[0]))
1552 return PTR_ERR(pols[0]);
1553
1554 *num_xfrms = pols[0]->xfrm_nr;
1555
1556#ifdef CONFIG_XFRM_SUB_POLICY
1557 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW &&
1558 pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1559 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]),
1560 XFRM_POLICY_TYPE_MAIN,
1561 fl, family,
1562 XFRM_POLICY_OUT);
1563 if (pols[1]) {
1564 if (IS_ERR(pols[1])) {
1565 xfrm_pols_put(pols, *num_pols);
1566 return PTR_ERR(pols[1]);
1567 }
1568 (*num_pols) ++;
1569 (*num_xfrms) += pols[1]->xfrm_nr;
1570 }
1571 }
1572#endif
1573 for (i = 0; i < *num_pols; i++) {
1574 if (pols[i]->action != XFRM_POLICY_ALLOW) {
1575 *num_xfrms = -1;
1576 break;
1577 }
1578 }
1579
1580 return 0;
1581
1582}
1583
1584static struct xfrm_dst *
1585xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
1586 struct flowi *fl, u16 family,
1587 struct dst_entry *dst_orig)
1588{
1589 struct net *net = xp_net(pols[0]);
1590 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1591 struct dst_entry *dst;
1592 struct xfrm_dst *xdst;
1593 int err;
1594
1595 /* Try to instantiate a bundle */
1596 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family);
1597 if (err < 0) {
1598 if (err != -EAGAIN)
1599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1600 return ERR_PTR(err);
1601 }
1602
1603 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig);
1604 if (IS_ERR(dst)) {
1605 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1606 return ERR_CAST(dst);
1607 }
1608
1609 xdst = (struct xfrm_dst *)dst;
1610 xdst->num_xfrms = err;
1611 if (num_pols > 1)
1612 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1613 else
1614 err = xfrm_dst_update_origin(dst, fl);
1615 if (unlikely(err)) {
1616 dst_free(dst);
1617 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1618 return ERR_PTR(err);
1619 }
1620
1621 xdst->num_pols = num_pols;
1622 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1623 xdst->policy_genid = atomic_read(&pols[0]->genid);
1624
1625 return xdst;
1626}
1627
1628static struct flow_cache_object *
1629xfrm_bundle_lookup(struct net *net, struct flowi *fl, u16 family, u8 dir,
1630 struct flow_cache_object *oldflo, void *ctx)
1631{
1632 struct dst_entry *dst_orig = (struct dst_entry *)ctx;
1633 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1634 struct xfrm_dst *xdst, *new_xdst;
1635 int num_pols = 0, num_xfrms = 0, i, err, pol_dead;
1636
1637 /* Check if the policies from old bundle are usable */
1638 xdst = NULL;
1639 if (oldflo) {
1640 xdst = container_of(oldflo, struct xfrm_dst, flo);
1641 num_pols = xdst->num_pols;
1642 num_xfrms = xdst->num_xfrms;
1643 pol_dead = 0;
1644 for (i = 0; i < num_pols; i++) {
1645 pols[i] = xdst->pols[i];
1646 pol_dead |= pols[i]->walk.dead;
1647 }
1648 if (pol_dead) {
1649 dst_free(&xdst->u.dst);
1650 xdst = NULL;
1651 num_pols = 0;
1652 num_xfrms = 0;
1653 oldflo = NULL;
1654 }
1655 }
1656
1657 /* Resolve policies to use if we couldn't get them from
1658 * previous cache entry */
1659 if (xdst == NULL) {
1660 num_pols = 1;
1661 pols[0] = __xfrm_policy_lookup(net, fl, family, dir);
1662 err = xfrm_expand_policies(fl, family, pols,
1663 &num_pols, &num_xfrms);
1664 if (err < 0)
1665 goto inc_error;
1666 if (num_pols == 0)
1667 return NULL;
1668 if (num_xfrms <= 0)
1669 goto make_dummy_bundle;
1670 }
1671
1672 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig);
1673 if (IS_ERR(new_xdst)) {
1674 err = PTR_ERR(new_xdst);
1675 if (err != -EAGAIN)
1676 goto error;
1677 if (oldflo == NULL)
1678 goto make_dummy_bundle;
1679 dst_hold(&xdst->u.dst);
1680 return oldflo;
1681 }
1682
1683 /* Kill the previous bundle */
1684 if (xdst) {
1685 /* The policies were stolen for newly generated bundle */
1686 xdst->num_pols = 0;
1687 dst_free(&xdst->u.dst);
1688 }
1689
1690 /* Flow cache does not have reference, it dst_free()'s,
1691 * but we do need to return one reference for original caller */
1692 dst_hold(&new_xdst->u.dst);
1693 return &new_xdst->flo;
1694
1695make_dummy_bundle:
1696 /* We found policies, but there's no bundles to instantiate:
1697 * either because the policy blocks, has no transformations or
1698 * we could not build template (no xfrm_states).*/
1699 xdst = xfrm_alloc_dst(net, family);
1700 if (IS_ERR(xdst)) {
1701 xfrm_pols_put(pols, num_pols);
1702 return ERR_CAST(xdst);
1703 }
1704 xdst->num_pols = num_pols;
1705 xdst->num_xfrms = num_xfrms;
1706 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
1707
1708 dst_hold(&xdst->u.dst);
1709 return &xdst->flo;
1710
1711inc_error:
1712 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1713error:
1714 if (xdst != NULL)
1715 dst_free(&xdst->u.dst);
1716 else
1717 xfrm_pols_put(pols, num_pols);
1718 return ERR_PTR(err);
1719}
1549 1720
1550/* Main function: finds/creates a bundle for given flow. 1721/* Main function: finds/creates a bundle for given flow.
1551 * 1722 *
@@ -1555,245 +1726,152 @@ static int stale_bundle(struct dst_entry *dst);
1555int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl, 1726int __xfrm_lookup(struct net *net, struct dst_entry **dst_p, struct flowi *fl,
1556 struct sock *sk, int flags) 1727 struct sock *sk, int flags)
1557{ 1728{
1558 struct xfrm_policy *policy;
1559 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1729 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1560 int npols; 1730 struct flow_cache_object *flo;
1561 int pol_dead; 1731 struct xfrm_dst *xdst;
1562 int xfrm_nr; 1732 struct dst_entry *dst, *dst_orig = *dst_p, *route;
1563 int pi; 1733 u16 family = dst_orig->ops->family;
1564 struct xfrm_state *xfrm[XFRM_MAX_DEPTH];
1565 struct dst_entry *dst, *dst_orig = *dst_p;
1566 int nx = 0;
1567 int err;
1568 u32 genid;
1569 u16 family;
1570 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1734 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT);
1735 int i, err, num_pols, num_xfrms, drop_pols = 0;
1571 1736
1572restart: 1737restart:
1573 genid = atomic_read(&flow_cache_genid); 1738 dst = NULL;
1574 policy = NULL; 1739 xdst = NULL;
1575 for (pi = 0; pi < ARRAY_SIZE(pols); pi++) 1740 route = NULL;
1576 pols[pi] = NULL;
1577 npols = 0;
1578 pol_dead = 0;
1579 xfrm_nr = 0;
1580 1741
1581 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1742 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) {
1582 policy = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1743 num_pols = 1;
1583 err = PTR_ERR(policy); 1744 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl);
1584 if (IS_ERR(policy)) { 1745 err = xfrm_expand_policies(fl, family, pols,
1585 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1746 &num_pols, &num_xfrms);
1747 if (err < 0)
1586 goto dropdst; 1748 goto dropdst;
1749
1750 if (num_pols) {
1751 if (num_xfrms <= 0) {
1752 drop_pols = num_pols;
1753 goto no_transform;
1754 }
1755
1756 xdst = xfrm_resolve_and_create_bundle(
1757 pols, num_pols, fl,
1758 family, dst_orig);
1759 if (IS_ERR(xdst)) {
1760 xfrm_pols_put(pols, num_pols);
1761 err = PTR_ERR(xdst);
1762 goto dropdst;
1763 }
1764
1765 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
1766 xdst->u.dst.next = xfrm_policy_sk_bundles;
1767 xfrm_policy_sk_bundles = &xdst->u.dst;
1768 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
1769
1770 route = xdst->route;
1587 } 1771 }
1588 } 1772 }
1589 1773
1590 if (!policy) { 1774 if (xdst == NULL) {
1591 /* To accelerate a bit... */ 1775 /* To accelerate a bit... */
1592 if ((dst_orig->flags & DST_NOXFRM) || 1776 if ((dst_orig->flags & DST_NOXFRM) ||
1593 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1777 !net->xfrm.policy_count[XFRM_POLICY_OUT])
1594 goto nopol; 1778 goto nopol;
1595 1779
1596 policy = flow_cache_lookup(net, fl, dst_orig->ops->family, 1780 flo = flow_cache_lookup(net, fl, family, dir,
1597 dir, xfrm_policy_lookup); 1781 xfrm_bundle_lookup, dst_orig);
1598 err = PTR_ERR(policy); 1782 if (flo == NULL)
1599 if (IS_ERR(policy)) { 1783 goto nopol;
1600 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1784 if (IS_ERR(flo)) {
1785 err = PTR_ERR(flo);
1601 goto dropdst; 1786 goto dropdst;
1602 } 1787 }
1788 xdst = container_of(flo, struct xfrm_dst, flo);
1789
1790 num_pols = xdst->num_pols;
1791 num_xfrms = xdst->num_xfrms;
1792 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols);
1793 route = xdst->route;
1794 }
1795
1796 dst = &xdst->u.dst;
1797 if (route == NULL && num_xfrms > 0) {
1798 /* The only case when xfrm_bundle_lookup() returns a
1799 * bundle with null route, is when the template could
1800 * not be resolved. It means policies are there, but
1801 * bundle could not be created, since we don't yet
1802 * have the xfrm_state's. We need to wait for KM to
1803 * negotiate new SA's or bail out with error.*/
1804 if (net->xfrm.sysctl_larval_drop) {
1805 /* EREMOTE tells the caller to generate
1806 * a one-shot blackhole route. */
1807 dst_release(dst);
1808 xfrm_pols_put(pols, num_pols);
1809 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1810 return -EREMOTE;
1811 }
1812 if (flags & XFRM_LOOKUP_WAIT) {
1813 DECLARE_WAITQUEUE(wait, current);
1814
1815 add_wait_queue(&net->xfrm.km_waitq, &wait);
1816 set_current_state(TASK_INTERRUPTIBLE);
1817 schedule();
1818 set_current_state(TASK_RUNNING);
1819 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1820
1821 if (!signal_pending(current)) {
1822 dst_release(dst);
1823 goto restart;
1824 }
1825
1826 err = -ERESTART;
1827 } else
1828 err = -EAGAIN;
1829
1830 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1831 goto error;
1603 } 1832 }
1604 1833
1605 if (!policy) 1834no_transform:
1835 if (num_pols == 0)
1606 goto nopol; 1836 goto nopol;
1607 1837
1608 family = dst_orig->ops->family; 1838 if ((flags & XFRM_LOOKUP_ICMP) &&
1609 pols[0] = policy; 1839 !(pols[0]->flags & XFRM_POLICY_ICMP)) {
1610 npols ++; 1840 err = -ENOENT;
1611 xfrm_nr += pols[0]->xfrm_nr;
1612
1613 err = -ENOENT;
1614 if ((flags & XFRM_LOOKUP_ICMP) && !(policy->flags & XFRM_POLICY_ICMP))
1615 goto error; 1841 goto error;
1842 }
1616 1843
1617 policy->curlft.use_time = get_seconds(); 1844 for (i = 0; i < num_pols; i++)
1845 pols[i]->curlft.use_time = get_seconds();
1618 1846
1619 switch (policy->action) { 1847 if (num_xfrms < 0) {
1620 default:
1621 case XFRM_POLICY_BLOCK:
1622 /* Prohibit the flow */ 1848 /* Prohibit the flow */
1623 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1849 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1624 err = -EPERM; 1850 err = -EPERM;
1625 goto error; 1851 goto error;
1626 1852 } else if (num_xfrms > 0) {
1627 case XFRM_POLICY_ALLOW: 1853 /* Flow transformed */
1628#ifndef CONFIG_XFRM_SUB_POLICY 1854 *dst_p = dst;
1629 if (policy->xfrm_nr == 0) { 1855 dst_release(dst_orig);
1630 /* Flow passes not transformed. */ 1856 } else {
1631 xfrm_pol_put(policy); 1857 /* Flow passes untransformed */
1632 return 0; 1858 dst_release(dst);
1633 }
1634#endif
1635
1636 /* Try to find matching bundle.
1637 *
1638 * LATER: help from flow cache. It is optional, this
1639 * is required only for output policy.
1640 */
1641 dst = xfrm_find_bundle(fl, policy, family);
1642 if (IS_ERR(dst)) {
1643 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1644 err = PTR_ERR(dst);
1645 goto error;
1646 }
1647
1648 if (dst)
1649 break;
1650
1651#ifdef CONFIG_XFRM_SUB_POLICY
1652 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) {
1653 pols[1] = xfrm_policy_lookup_bytype(net,
1654 XFRM_POLICY_TYPE_MAIN,
1655 fl, family,
1656 XFRM_POLICY_OUT);
1657 if (pols[1]) {
1658 if (IS_ERR(pols[1])) {
1659 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR);
1660 err = PTR_ERR(pols[1]);
1661 goto error;
1662 }
1663 if (pols[1]->action == XFRM_POLICY_BLOCK) {
1664 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK);
1665 err = -EPERM;
1666 goto error;
1667 }
1668 npols ++;
1669 xfrm_nr += pols[1]->xfrm_nr;
1670 }
1671 }
1672
1673 /*
1674 * Because neither flowi nor bundle information knows about
1675 * transformation template size. On more than one policy usage
1676 * we can realize whether all of them is bypass or not after
1677 * they are searched. See above not-transformed bypass
1678 * is surrounded by non-sub policy configuration, too.
1679 */
1680 if (xfrm_nr == 0) {
1681 /* Flow passes not transformed. */
1682 xfrm_pols_put(pols, npols);
1683 return 0;
1684 }
1685
1686#endif
1687 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1688
1689 if (unlikely(nx<0)) {
1690 err = nx;
1691 if (err == -EAGAIN && net->xfrm.sysctl_larval_drop) {
1692 /* EREMOTE tells the caller to generate
1693 * a one-shot blackhole route.
1694 */
1695 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1696 xfrm_pol_put(policy);
1697 return -EREMOTE;
1698 }
1699 if (err == -EAGAIN && (flags & XFRM_LOOKUP_WAIT)) {
1700 DECLARE_WAITQUEUE(wait, current);
1701
1702 add_wait_queue(&net->xfrm.km_waitq, &wait);
1703 set_current_state(TASK_INTERRUPTIBLE);
1704 schedule();
1705 set_current_state(TASK_RUNNING);
1706 remove_wait_queue(&net->xfrm.km_waitq, &wait);
1707
1708 nx = xfrm_tmpl_resolve(pols, npols, fl, xfrm, family);
1709
1710 if (nx == -EAGAIN && signal_pending(current)) {
1711 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1712 err = -ERESTART;
1713 goto error;
1714 }
1715 if (nx == -EAGAIN ||
1716 genid != atomic_read(&flow_cache_genid)) {
1717 xfrm_pols_put(pols, npols);
1718 goto restart;
1719 }
1720 err = nx;
1721 }
1722 if (err < 0) {
1723 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
1724 goto error;
1725 }
1726 }
1727 if (nx == 0) {
1728 /* Flow passes not transformed. */
1729 xfrm_pols_put(pols, npols);
1730 return 0;
1731 }
1732
1733 dst = xfrm_bundle_create(policy, xfrm, nx, fl, dst_orig);
1734 err = PTR_ERR(dst);
1735 if (IS_ERR(dst)) {
1736 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR);
1737 goto error;
1738 }
1739
1740 for (pi = 0; pi < npols; pi++) {
1741 read_lock_bh(&pols[pi]->lock);
1742 pol_dead |= pols[pi]->walk.dead;
1743 read_unlock_bh(&pols[pi]->lock);
1744 }
1745
1746 write_lock_bh(&policy->lock);
1747 if (unlikely(pol_dead || stale_bundle(dst))) {
1748 /* Wow! While we worked on resolving, this
1749 * policy has gone. Retry. It is not paranoia,
1750 * we just cannot enlist new bundle to dead object.
1751 * We can't enlist stable bundles either.
1752 */
1753 write_unlock_bh(&policy->lock);
1754 dst_free(dst);
1755
1756 if (pol_dead)
1757 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLDEAD);
1758 else
1759 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1760 err = -EHOSTUNREACH;
1761 goto error;
1762 }
1763
1764 if (npols > 1)
1765 err = xfrm_dst_update_parent(dst, &pols[1]->selector);
1766 else
1767 err = xfrm_dst_update_origin(dst, fl);
1768 if (unlikely(err)) {
1769 write_unlock_bh(&policy->lock);
1770 dst_free(dst);
1771 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR);
1772 goto error;
1773 }
1774
1775 dst->next = policy->bundles;
1776 policy->bundles = dst;
1777 dst_hold(dst);
1778 write_unlock_bh(&policy->lock);
1779 } 1859 }
1780 *dst_p = dst; 1860ok:
1781 dst_release(dst_orig); 1861 xfrm_pols_put(pols, drop_pols);
1782 xfrm_pols_put(pols, npols);
1783 return 0; 1862 return 0;
1784 1863
1864nopol:
1865 if (!(flags & XFRM_LOOKUP_ICMP))
1866 goto ok;
1867 err = -ENOENT;
1785error: 1868error:
1786 xfrm_pols_put(pols, npols); 1869 dst_release(dst);
1787dropdst: 1870dropdst:
1788 dst_release(dst_orig); 1871 dst_release(dst_orig);
1789 *dst_p = NULL; 1872 *dst_p = NULL;
1873 xfrm_pols_put(pols, drop_pols);
1790 return err; 1874 return err;
1791
1792nopol:
1793 err = -ENOENT;
1794 if (flags & XFRM_LOOKUP_ICMP)
1795 goto dropdst;
1796 return 0;
1797} 1875}
1798EXPORT_SYMBOL(__xfrm_lookup); 1876EXPORT_SYMBOL(__xfrm_lookup);
1799 1877
@@ -1952,9 +2030,16 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb,
1952 } 2030 }
1953 } 2031 }
1954 2032
1955 if (!pol) 2033 if (!pol) {
1956 pol = flow_cache_lookup(net, &fl, family, fl_dir, 2034 struct flow_cache_object *flo;
1957 xfrm_policy_lookup); 2035
2036 flo = flow_cache_lookup(net, &fl, family, fl_dir,
2037 xfrm_policy_lookup, NULL);
2038 if (IS_ERR_OR_NULL(flo))
2039 pol = ERR_CAST(flo);
2040 else
2041 pol = container_of(flo, struct xfrm_policy, flo);
2042 }
1958 2043
1959 if (IS_ERR(pol)) { 2044 if (IS_ERR(pol)) {
1960 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2045 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR);
@@ -2138,71 +2223,24 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
2138 return dst; 2223 return dst;
2139} 2224}
2140 2225
2141static void prune_one_bundle(struct xfrm_policy *pol, int (*func)(struct dst_entry *), struct dst_entry **gc_list_p) 2226static void __xfrm_garbage_collect(struct net *net)
2142{
2143 struct dst_entry *dst, **dstp;
2144
2145 write_lock(&pol->lock);
2146 dstp = &pol->bundles;
2147 while ((dst=*dstp) != NULL) {
2148 if (func(dst)) {
2149 *dstp = dst->next;
2150 dst->next = *gc_list_p;
2151 *gc_list_p = dst;
2152 } else {
2153 dstp = &dst->next;
2154 }
2155 }
2156 write_unlock(&pol->lock);
2157}
2158
2159static void xfrm_prune_bundles(struct net *net, int (*func)(struct dst_entry *))
2160{ 2227{
2161 struct dst_entry *gc_list = NULL; 2228 struct dst_entry *head, *next;
2162 int dir;
2163 2229
2164 read_lock_bh(&xfrm_policy_lock); 2230 flow_cache_flush();
2165 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) {
2166 struct xfrm_policy *pol;
2167 struct hlist_node *entry;
2168 struct hlist_head *table;
2169 int i;
2170 2231
2171 hlist_for_each_entry(pol, entry, 2232 spin_lock_bh(&xfrm_policy_sk_bundle_lock);
2172 &net->xfrm.policy_inexact[dir], bydst) 2233 head = xfrm_policy_sk_bundles;
2173 prune_one_bundle(pol, func, &gc_list); 2234 xfrm_policy_sk_bundles = NULL;
2235 spin_unlock_bh(&xfrm_policy_sk_bundle_lock);
2174 2236
2175 table = net->xfrm.policy_bydst[dir].table; 2237 while (head) {
2176 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 2238 next = head->next;
2177 hlist_for_each_entry(pol, entry, table + i, bydst) 2239 dst_free(head);
2178 prune_one_bundle(pol, func, &gc_list); 2240 head = next;
2179 }
2180 }
2181 read_unlock_bh(&xfrm_policy_lock);
2182
2183 while (gc_list) {
2184 struct dst_entry *dst = gc_list;
2185 gc_list = dst->next;
2186 dst_free(dst);
2187 } 2241 }
2188} 2242}
2189 2243
2190static int unused_bundle(struct dst_entry *dst)
2191{
2192 return !atomic_read(&dst->__refcnt);
2193}
2194
2195static void __xfrm_garbage_collect(struct net *net)
2196{
2197 xfrm_prune_bundles(net, unused_bundle);
2198}
2199
2200static int xfrm_flush_bundles(struct net *net)
2201{
2202 xfrm_prune_bundles(net, stale_bundle);
2203 return 0;
2204}
2205
2206static void xfrm_init_pmtu(struct dst_entry *dst) 2244static void xfrm_init_pmtu(struct dst_entry *dst)
2207{ 2245{
2208 do { 2246 do {
@@ -2260,7 +2298,9 @@ int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *first,
2260 return 0; 2298 return 0;
2261 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2299 if (dst->xfrm->km.state != XFRM_STATE_VALID)
2262 return 0; 2300 return 0;
2263 if (xdst->genid != dst->xfrm->genid) 2301 if (xdst->xfrm_genid != dst->xfrm->genid)
2302 return 0;
2303 if (xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
2264 return 0; 2304 return 0;
2265 2305
2266 if (strict && fl && 2306 if (strict && fl &&
@@ -2425,7 +2465,7 @@ static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void
2425 2465
2426 switch (event) { 2466 switch (event) {
2427 case NETDEV_DOWN: 2467 case NETDEV_DOWN:
2428 xfrm_flush_bundles(dev_net(dev)); 2468 __xfrm_garbage_collect(dev_net(dev));
2429 } 2469 }
2430 return NOTIFY_DONE; 2470 return NOTIFY_DONE;
2431} 2471}
@@ -2531,7 +2571,6 @@ static void xfrm_policy_fini(struct net *net)
2531 audit_info.sessionid = -1; 2571 audit_info.sessionid = -1;
2532 audit_info.secid = 0; 2572 audit_info.secid = 0;
2533 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2573 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info);
2534 flush_work(&xfrm_policy_gc_work);
2535 2574
2536 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2575 WARN_ON(!list_empty(&net->xfrm.policy_all));
2537 2576
@@ -2757,7 +2796,6 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
2757 struct xfrm_migrate *m, int num_migrate) 2796 struct xfrm_migrate *m, int num_migrate)
2758{ 2797{
2759 struct xfrm_migrate *mp; 2798 struct xfrm_migrate *mp;
2760 struct dst_entry *dst;
2761 int i, j, n = 0; 2799 int i, j, n = 0;
2762 2800
2763 write_lock_bh(&pol->lock); 2801 write_lock_bh(&pol->lock);
@@ -2782,10 +2820,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
2782 sizeof(pol->xfrm_vec[i].saddr)); 2820 sizeof(pol->xfrm_vec[i].saddr));
2783 pol->xfrm_vec[i].encap_family = mp->new_family; 2821 pol->xfrm_vec[i].encap_family = mp->new_family;
2784 /* flush bundles */ 2822 /* flush bundles */
2785 while ((dst = pol->bundles) != NULL) { 2823 atomic_inc(&pol->genid);
2786 pol->bundles = dst->next;
2787 dst_free(dst);
2788 }
2789 } 2824 }
2790 } 2825 }
2791 2826
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index add77ecb8ac4..5208b12fbfb4 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -38,7 +38,6 @@
38static DEFINE_SPINLOCK(xfrm_state_lock); 38static DEFINE_SPINLOCK(xfrm_state_lock);
39 39
40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024; 40static unsigned int xfrm_state_hashmax __read_mostly = 1 * 1024 * 1024;
41static unsigned int xfrm_state_genid;
42 41
43static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family); 42static struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
44static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo); 43static void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
@@ -924,8 +923,6 @@ static void __xfrm_state_insert(struct xfrm_state *x)
924 struct net *net = xs_net(x); 923 struct net *net = xs_net(x);
925 unsigned int h; 924 unsigned int h;
926 925
927 x->genid = ++xfrm_state_genid;
928
929 list_add(&x->km.all, &net->xfrm.state_all); 926 list_add(&x->km.all, &net->xfrm.state_all);
930 927
931 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr, 928 h = xfrm_dst_hash(net, &x->id.daddr, &x->props.saddr,
@@ -971,7 +968,7 @@ static void __xfrm_state_bump_genids(struct xfrm_state *xnew)
971 (mark & x->mark.m) == x->mark.v && 968 (mark & x->mark.m) == x->mark.v &&
972 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) && 969 !xfrm_addr_cmp(&x->id.daddr, &xnew->id.daddr, family) &&
973 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family)) 970 !xfrm_addr_cmp(&x->props.saddr, &xnew->props.saddr, family))
974 x->genid = xfrm_state_genid; 971 x->genid++;
975 } 972 }
976} 973}
977 974
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 6106b72826d3..a267fbdda525 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -1741,6 +1741,10 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1741 if (err) 1741 if (err)
1742 return err; 1742 return err;
1743 1743
1744 err = verify_policy_dir(p->dir);
1745 if (err)
1746 return err;
1747
1744 if (p->index) 1748 if (p->index)
1745 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err); 1749 xp = xfrm_policy_byid(net, mark, type, p->dir, p->index, 0, &err);
1746 else { 1750 else {
@@ -1766,13 +1770,9 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
1766 if (xp == NULL) 1770 if (xp == NULL)
1767 return -ENOENT; 1771 return -ENOENT;
1768 1772
1769 read_lock(&xp->lock); 1773 if (unlikely(xp->walk.dead))
1770 if (xp->walk.dead) {
1771 read_unlock(&xp->lock);
1772 goto out; 1774 goto out;
1773 }
1774 1775
1775 read_unlock(&xp->lock);
1776 err = 0; 1776 err = 0;
1777 if (up->hard) { 1777 if (up->hard) {
1778 uid_t loginuid = NETLINK_CB(skb).loginuid; 1778 uid_t loginuid = NETLINK_CB(skb).loginuid;
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 220213e603db..36a60a853173 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -796,6 +796,28 @@ static int do_platform_entry(const char *filename,
796 return 1; 796 return 1;
797} 797}
798 798
799static int do_mdio_entry(const char *filename,
800 struct mdio_device_id *id, char *alias)
801{
802 int i;
803
804 alias += sprintf(alias, MDIO_MODULE_PREFIX);
805
806 for (i = 0; i < 32; i++) {
807 if (!((id->phy_id_mask >> (31-i)) & 1))
808 *(alias++) = '?';
809 else if ((id->phy_id >> (31-i)) & 1)
810 *(alias++) = '1';
811 else
812 *(alias++) = '0';
813 }
814
815 /* Terminate the string */
816 *alias = 0;
817
818 return 1;
819}
820
799/* Ignore any prefix, eg. some architectures prepend _ */ 821/* Ignore any prefix, eg. some architectures prepend _ */
800static inline int sym_is(const char *symbol, const char *name) 822static inline int sym_is(const char *symbol, const char *name)
801{ 823{
@@ -943,6 +965,10 @@ void handle_moddevtable(struct module *mod, struct elf_info *info,
943 do_table(symval, sym->st_size, 965 do_table(symval, sym->st_size,
944 sizeof(struct platform_device_id), "platform", 966 sizeof(struct platform_device_id), "platform",
945 do_platform_entry, mod); 967 do_platform_entry, mod);
968 else if (sym_is(symname, "__mod_mdio_device_table"))
969 do_table(symval, sym->st_size,
970 sizeof(struct mdio_device_id), "mdio",
971 do_mdio_entry, mod);
946 free(zeros); 972 free(zeros);
947} 973}
948 974